ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
02d4b049-ec00-4653-bf8f-7bb9bd5e4cf6 | cpp | google/tensorstore | http_request | tensorstore/internal/http/http_request.cc | tensorstore/internal/http/http_request_test.cc | #include "tensorstore/internal/http/http_request.h"
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/byte_range.h"
namespace tensorstore {
namespace internal_http {
std::optional<std::string> FormatRangeHeader(
OptionalByteRangeRequest byte_range) {
assert(byte_range.SatisfiesInvariants());
if (byte_range.IsRange() &&
byte_range.exclusive_max > byte_range.inclusive_min) {
return absl::StrFormat("Range: bytes=%d-%d", byte_range.inclusive_min,
byte_range.exclusive_max - 1);
}
if (byte_range.IsSuffix()) {
return absl::StrFormat("Range: bytes=%d-", byte_range.inclusive_min);
}
if (byte_range.IsSuffixLength()) {
return absl::StrFormat("Range: bytes=%d", byte_range.inclusive_min);
}
return std::nullopt;
}
std::optional<std::string> FormatCacheControlMaxAgeHeader(
absl::Duration max_age) {
if (max_age >= absl::InfiniteDuration()) {
return std::nullopt;
}
auto max_age_seconds = absl::ToInt64Seconds(max_age);
if (max_age_seconds > 0) {
return absl::StrFormat("cache-control: max-age=%d", max_age_seconds);
} else {
return "cache-control: no-cache";
}
}
std::optional<std::string> FormatStalenessBoundCacheControlHeader(
absl::Time staleness_bound) {
if (staleness_bound == absl::InfinitePast()) {
return std::nullopt;
}
absl::Time now;
absl::Duration duration = absl::ZeroDuration();
if (staleness_bound != absl::InfiniteFuture() &&
(now = absl::Now()) > staleness_bound) {
duration = now - staleness_bound;
}
return FormatCacheControlMaxAgeHeader(duration);
}
HttpRequestBuilder::HttpRequestBuilder(
std::string_view method, std::string base_url,
absl::FunctionRef<std::string(std::string_view)> uri_encoder)
: uri_encoder_(uri_encoder),
request_{std::string(method), std::move(base_url)},
query_parameter_separator_("?") {
assert(!request_.method.empty());
assert(request_.method ==
absl::AsciiStrToUpper(std::string_view(request_.method)));
if (request_.url.find_last_of('?') != std::string::npos) {
query_parameter_separator_ = "&";
}
}
HttpRequest HttpRequestBuilder::BuildRequest() { return std::move(request_); }
HttpRequestBuilder& HttpRequestBuilder::AddHeader(std::string header) {
if (!header.empty()) {
request_.headers.push_back(std::move(header));
}
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::AddQueryParameter(
std::string_view key, std::string_view value) {
assert(!key.empty());
if (value.empty()) {
absl::StrAppend(&request_.url, query_parameter_separator_,
uri_encoder_(key));
} else {
absl::StrAppend(&request_.url, query_parameter_separator_,
uri_encoder_(key), "=", uri_encoder_(value));
}
query_parameter_separator_ = "&";
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::EnableAcceptEncoding() {
request_.accept_encoding = true;
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::MaybeAddRangeHeader(
OptionalByteRangeRequest byte_range) {
return AddHeader(FormatRangeHeader(std::move(byte_range)));
}
HttpRequestBuilder& HttpRequestBuilder::MaybeAddCacheControlMaxAgeHeader(
absl::Duration max_age) {
return AddHeader(FormatCacheControlMaxAgeHeader(max_age));
}
HttpRequestBuilder&
HttpRequestBuilder::MaybeAddStalenessBoundCacheControlHeader(
absl::Time staleness_bound) {
return AddHeader(FormatStalenessBoundCacheControlHeader(staleness_bound));
}
HttpRequestBuilder& HttpRequestBuilder::AddHostHeader(std::string_view host) {
if (host.empty()) {
host = internal::ParseGenericUri(request_.url).authority;
}
return AddHeader(absl::StrFormat("host: %s", host));
}
}
} | #include "tensorstore/internal/http/http_request.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/kvstore/byte_range.h"
namespace {
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::testing::AnyOf;
using ::testing::ElementsAre;
TEST(HttpRequestBuilder, BuildRequest) {
auto request = HttpRequestBuilder("GET", "http:
.AddHeader("X-foo: bar")
.AddQueryParameter("name", "dragon")
.AddQueryParameter("age", "1234")
.EnableAcceptEncoding()
.BuildRequest();
EXPECT_EQ("http:
EXPECT_TRUE(request.accept_encoding);
EXPECT_EQ("GET", request.method);
EXPECT_THAT(request.headers, testing::ElementsAre("X-foo: bar"));
}
TEST(HttpRequestBuilder, AddCacheControlMaxAgeHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::InfiniteDuration());
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::ZeroDuration());
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: max-age=10"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(-absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
}
TEST(HttpRequestBuilder, AddStalenessBoundCacheControlHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::InfinitePast());
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::InfiniteFuture());
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
const absl::Time kFutureTime = absl::Now() + absl::Minutes(525600);
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(kFutureTime);
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::Now() -
absl::Milliseconds(5900));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre(AnyOf("cache-control: max-age=4",
"cache-control: max-age=5")));
}
}
TEST(HttpRequestBuilder, MaybeAddRangeHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader({});
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest::Suffix(1));
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("Range: bytes=1-"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest::SuffixLength(5));
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("Range: bytes=-5"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest{1, 2});
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("Range: bytes=1-1"));
}
}
TEST(HttpRequestBuilder, AddHostHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader({});
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("host: 127.0.0.1"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader("host.header");
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("host: host.header"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader({});
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("host: localhost:1234"));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_request.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_request_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e88c2c63-3fd1-4c20-8428-dd2ab61e3679 | cpp | google/tensorstore | http_header | tensorstore/internal/http/http_header.cc | tensorstore/internal/http/http_header_test.cc | #include "tensorstore/internal/http/http_header.h"
#include <stddef.h>
#include <iterator>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "re2/re2.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_http {
namespace {
static inline constexpr internal::AsciiSet kTChar{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
R"(!#$%&'*+-.)"};
inline bool IsTchar(char ch) { return kTChar.Test(ch); }
inline bool IsOWS(char ch) { return ch == ' ' || ch == '\t'; }
}
absl::Status ValidateHttpHeader(std::string_view header) {
static LazyRE2 kHeaderPattern = {
"[!#\\$%&'*+\\-\\.\\^_`|~0-9a-zA-Z]+"
":"
"[\t\x20-\x7e\x80-\xff]*",
RE2::Latin1};
if (!RE2::FullMatch(header, *kHeaderPattern)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid HTTP header: ", tensorstore::QuoteString(header)));
}
return absl::OkStatus();
}
size_t AppendHeaderData(absl::btree_multimap<std::string, std::string>& headers,
std::string_view data) {
if (data.empty() || *data.rbegin() != '\n') return data.size();
for (std::string_view field : absl::StrSplit(data, '\n', absl::SkipEmpty())) {
if (field.empty() || *field.rbegin() != '\r') break;
field.remove_suffix(1);
while (!field.empty() && IsOWS(*field.rbegin())) field.remove_suffix(1);
if (field.empty()) continue;
auto it = field.begin();
for (; it != field.end() && IsTchar(*it); ++it) {
}
if (it == field.begin() || it == field.end() || *it != ':') {
continue;
}
std::string field_name = absl::AsciiStrToLower(
std::string_view(field.data(), std::distance(field.begin(), it)));
field.remove_prefix(field_name.size() + 1);
while (!field.empty() && IsOWS(*field.begin())) field.remove_prefix(1);
headers.emplace(std::move(field_name), std::string(field));
}
return data.size();
}
std::optional<std::tuple<size_t, size_t, size_t>> TryParseContentRangeHeader(
const absl::btree_multimap<std::string, std::string>& headers) {
auto it = headers.find("content-range");
if (it == headers.end()) {
return std::nullopt;
}
static LazyRE2 kContentRange1 = {R"(^bytes (\d+)-(\d+)/(\d+))"};
static LazyRE2 kContentRange2 = {R"(^bytes (\d+)-(\d+)(/[*])?)"};
std::tuple<size_t, size_t, size_t> result(0, 0, 0);
if (RE2::FullMatch(it->second, *kContentRange1, &std::get<0>(result),
&std::get<1>(result), &std::get<2>(result))) {
return result;
}
if (RE2::FullMatch(it->second, *kContentRange2, &std::get<0>(result),
&std::get<1>(result))) {
return result;
}
return std::nullopt;
}
std::optional<bool> TryParseBoolHeader(
const absl::btree_multimap<std::string, std::string>& headers,
std::string_view header) {
auto it = headers.find(header);
bool result;
if (it != headers.end() && absl::SimpleAtob(it->second, &result)) {
return result;
}
return std::nullopt;
}
std::optional<size_t> TryGetContentLength(
const absl::btree_multimap<std::string, std::string>& headers) {
std::optional<size_t> content_length;
if (headers.find("transfer-encoding") == headers.end() &&
headers.find("content-encoding") == headers.end()) {
content_length = TryParseIntHeader<size_t>(headers, "content-length");
}
if (!content_length) {
auto content_range = TryParseContentRangeHeader(headers);
if (content_range) {
content_length =
1 + std::get<1>(*content_range) - std::get<0>(*content_range);
}
}
return content_length;
}
}
} | #include "tensorstore/internal/http/http_header.h"
#include <stddef.h>
#include <optional>
#include <string>
#include <tuple>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_http::AppendHeaderData;
using ::tensorstore::internal_http::TryParseBoolHeader;
using ::tensorstore::internal_http::TryParseContentRangeHeader;
using ::tensorstore::internal_http::TryParseIntHeader;
using ::tensorstore::internal_http::ValidateHttpHeader;
TEST(ValidateHttpHeaderTest, Valid) {
TENSORSTORE_EXPECT_OK(ValidateHttpHeader("a!#$%&'*+-.^_`|~3X: b\xfe"));
}
TEST(ValidateHttpHeaderTest, Invalid) {
EXPECT_THAT(ValidateHttpHeader("a"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ValidateHttpHeader("a: \n"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(AppendHeaderData, BadHeaders) {
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(0, AppendHeaderData(headers, ""));
EXPECT_EQ(2, AppendHeaderData(headers, "\r\n"));
EXPECT_EQ(8, AppendHeaderData(headers, "foo: bar"));
EXPECT_EQ(5, AppendHeaderData(headers, "foo\r\n"));
EXPECT_EQ(7, AppendHeaderData(headers, "fo@: \r\n"));
EXPECT_TRUE(headers.empty());
}
TEST(AppendHeaderData, GoodHeaders) {
{
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(10, AppendHeaderData(headers, "bar: baz\r\n"));
EXPECT_FALSE(headers.empty());
ASSERT_EQ(1, headers.count("bar"));
auto range = headers.equal_range("bar");
EXPECT_EQ("baz", range.first->second);
}
{
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(6, AppendHeaderData(headers, "foo:\r\n"));
ASSERT_EQ(1, headers.count("foo"));
auto range = headers.equal_range("foo");
EXPECT_EQ("", range.first->second);
}
{
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(16, AppendHeaderData(headers, "bAr: \t baz \t\r\n"));
ASSERT_EQ(1, headers.count("bar"));
auto range = headers.equal_range("bar");
EXPECT_EQ("baz", range.first->second);
}
{
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(16, AppendHeaderData(headers, "bAr: \t one \t\r\n"));
EXPECT_EQ(10, AppendHeaderData(headers, "bar: two\r\n"));
ASSERT_EQ(2, headers.count("bar"));
auto range = headers.equal_range("bar");
EXPECT_EQ("one", range.first->second);
++range.first;
EXPECT_EQ("two", range.first->second);
}
}
TEST(TryParse, ContentRangeHeader) {
EXPECT_THAT(
TryParseContentRangeHeader({{"content-range", "bytes 10-20/100"}}),
::testing::Optional(
testing::Eq(std::tuple<size_t, size_t, size_t>(10, 20, 100))));
EXPECT_THAT(TryParseContentRangeHeader({{"content-range", "bytes 10-20/*"}}),
::testing::Optional(
testing::Eq(std::tuple<size_t, size_t, size_t>(10, 20, 0))));
EXPECT_THAT(TryParseContentRangeHeader({{"content-range", "bytes 10-20"}}),
::testing::Optional(
testing::Eq(std::tuple<size_t, size_t, size_t>(10, 20, 0))));
EXPECT_THAT(
TryParseContentRangeHeader({{"content-range", "bytes 1-abc/100"}}),
::testing::Eq(std::nullopt));
}
TEST(TryParse, BoolHeader) {
EXPECT_THAT(TryParseBoolHeader({{"bool-header", "true"}}, "bool-header"),
::testing::Optional(testing::Eq(true)));
}
TEST(TryParse, IntHeader) {
EXPECT_THAT(TryParseIntHeader<size_t>({{"int-header", "100"}}, "int-header"),
::testing::Optional(testing::Eq(100)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_header.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_header_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
321c5dd0-c284-4e83-8f76-cc62c4ee08b6 | cpp | google/tensorstore | http_response | tensorstore/internal/http/http_response.cc | tensorstore/internal/http/http_response_test.cc | #include "tensorstore/internal/http/http_response.h"
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "re2/re2.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_http {
const char* HttpResponseCodeToMessage(const HttpResponse& response) {
switch (response.status_code) {
case 400:
return "Bad Request";
case 401:
return "Unauthorized";
case 402:
return "Payment Required";
case 403:
return "Forbidden";
case 404:
return "Not Found";
case 405:
return "Method Not Allowed";
case 406:
return "Not Acceptable";
case 407:
return "Proxy Authentication Required";
case 408:
return "Request Timeout";
case 409:
return "Conflict";
case 410:
return "Gone";
case 411:
return "Length Required";
case 412:
return "Precondition Failed";
case 413:
return "Payload Too Large";
case 414:
return "URI Too Long";
case 415:
return "Unsupported Media Type";
case 416:
return "Range Not Satisfiable";
case 417:
return "Expectation Failed";
case 418:
return "I'm a teapot";
case 421:
return "Misdirected Request";
case 422:
return "Unprocessable Content";
case 423:
return "Locked";
case 424:
return "Failed Dependency";
case 425:
return "Too Early";
case 426:
return "Upgrade Required";
case 428:
return "Precondition Required";
case 429:
return "Too Many Requests";
case 431:
return "Request Header Fields Too Large";
case 451:
return "Unavailable For Legal Reasons";
case 500:
return "Internal Server Error";
case 501:
return "Not Implemented";
case 502:
return "Bad Gateway";
case 503:
return "Service Unavailable";
case 504:
return "Gateway Timeout";
case 505:
return "HTTP Version Not Supported";
case 506:
return "Variant Also Negotiates";
case 507:
return "Insufficient Storage";
case 508:
return "Loop Detected";
case 510:
return "Not Extended";
case 511:
return "Network Authentication Required";
default:
return nullptr;
}
}
absl::StatusCode HttpResponseCodeToStatusCode(const HttpResponse& response) {
switch (response.status_code) {
case 200:
case 201:
case 202:
case 204:
case 206:
return absl::StatusCode::kOk;
case 400:
case 411:
return absl::StatusCode::kInvalidArgument;
case 401:
case 403:
return absl::StatusCode::kPermissionDenied;
case 404:
case 410:
return absl::StatusCode::kNotFound;
case 302:
case 303:
case 304:
case 307:
case 412:
case 413:
return absl::StatusCode::kFailedPrecondition;
case 416:
return absl::StatusCode::kOutOfRange;
case 308:
case 408:
case 409:
case 429:
case 500:
case 502:
case 503:
case 504:
return absl::StatusCode::kUnavailable;
}
if (response.status_code < 300) {
return absl::StatusCode::kOk;
}
return absl::StatusCode::kUnknown;
}
absl::Status HttpResponseCodeToStatus(const HttpResponse& response,
SourceLocation loc) {
auto code = HttpResponseCodeToStatusCode(response);
if (code == absl::StatusCode::kOk) {
return absl::OkStatus();
}
auto status_message = HttpResponseCodeToMessage(response);
if (!status_message) status_message = "Unknown";
absl::Status status(code, status_message);
if (!response.payload.empty()) {
status.SetPayload(
"http_response_body",
response.payload.Subcord(
0, response.payload.size() < 256 ? response.payload.size() : 256));
}
MaybeAddSourceLocation(status, loc);
status.SetPayload("http_response_code",
absl::Cord(tensorstore::StrCat(response.status_code)));
return status;
}
Result<ParsedContentRange> ParseContentRangeHeader(
const HttpResponse& response) {
auto it = response.headers.find("content-range");
if (it == response.headers.end()) {
if (response.status_code != 206) {
return absl::FailedPreconditionError(
tensorstore::StrCat("No Content-Range header expected with HTTP ",
response.status_code, " response"));
}
return absl::FailedPreconditionError(
"Expected Content-Range header with HTTP 206 response");
}
static const RE2 kContentRangeRegex(R"(^bytes (\d+)-(\d+)/(?:(\d+)|\*))");
int64_t a, b;
std::optional<int64_t> total_size;
if (!RE2::FullMatch(it->second, kContentRangeRegex, &a, &b, &total_size) ||
a > b || (total_size && b >= *total_size) ||
b == std::numeric_limits<int64_t>::max()) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Unexpected Content-Range header received: ", QuoteString(it->second)));
}
return ParsedContentRange{a, b + 1, total_size.value_or(-1)};
}
}
} | #include "tensorstore/internal/http/http_response.h"
#include <set>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::IsOkAndHolds;
using ::tensorstore::internal_http::HttpResponse;
TEST(HttpResponseCodeToStatusTest, AllCodes) {
using ::tensorstore::internal_http::HttpResponseCodeToStatus;
absl::flat_hash_set<int> seen;
for (auto code : {200, 201, 204, 206}) {
seen.insert(code);
EXPECT_TRUE(HttpResponseCodeToStatus({code, {}, {}}).ok()) << code;
}
for (auto code : {400, 411}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kInvalidArgument,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {401, 403}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kPermissionDenied,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {404, 410}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kNotFound,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {302, 303, 304, 307, 412, 413}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kFailedPrecondition,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {416}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kOutOfRange,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {308, 408, 409, 429, 500, 502, 503, 504}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kUnavailable,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (int i = 300; i < 600; i++) {
if (seen.count(i) > 0) continue;
EXPECT_EQ(absl::StatusCode::kUnknown,
HttpResponseCodeToStatus({i, {}, {}}).code())
<< i;
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_response.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_response_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2bd5b8cb-2a69-4e2d-8ee5-48abd5be59bf | cpp | google/tensorstore | curl_wrappers | tensorstore/internal/http/curl_wrappers.cc | tensorstore/internal/http/curl_wrappers_test.cc | #include "tensorstore/internal/http/curl_wrappers.h"
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <curl/curl.h>
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_http {
void CurlPtrCleanup::operator()(CURL* c) { curl_easy_cleanup(c); }
void CurlMultiCleanup::operator()(CURLM* m) { curl_multi_cleanup(m); }
void CurlSlistCleanup::operator()(curl_slist* s) { curl_slist_free_all(s); }
std::string GetCurlUserAgentSuffix() {
static std::string agent =
tensorstore::StrCat("tensorstore/0.1 ", curl_version());
return agent;
}
absl::Status CurlCodeToStatus(CURLcode code, std::string_view detail,
SourceLocation loc) {
auto error_code = absl::StatusCode::kUnknown;
switch (code) {
case CURLE_OK:
return absl::OkStatus();
case CURLE_COULDNT_RESOLVE_PROXY:
error_code = absl::StatusCode::kUnavailable;
if (detail.empty()) detail = "Failed to resolve proxy";
break;
case CURLE_OPERATION_TIMEDOUT:
error_code = absl::StatusCode::kDeadlineExceeded;
if (detail.empty()) detail = "Timed out";
break;
case CURLE_COULDNT_CONNECT:
case CURLE_COULDNT_RESOLVE_HOST:
case CURLE_GOT_NOTHING:
case CURLE_HTTP2:
case CURLE_HTTP2_STREAM:
case CURLE_PARTIAL_FILE:
case CURLE_RECV_ERROR:
case CURLE_SEND_ERROR:
case CURLE_SSL_CONNECT_ERROR:
case CURLE_UNSUPPORTED_PROTOCOL:
error_code = absl::StatusCode::kUnavailable;
break;
case CURLE_URL_MALFORMAT:
error_code = absl::StatusCode::kInvalidArgument;
break;
case CURLE_WRITE_ERROR:
error_code = absl::StatusCode::kCancelled;
break;
case CURLE_ABORTED_BY_CALLBACK:
error_code = absl::StatusCode::kAborted;
break;
case CURLE_REMOTE_ACCESS_DENIED:
error_code = absl::StatusCode::kPermissionDenied;
break;
case CURLE_SEND_FAIL_REWIND:
case CURLE_RANGE_ERROR:
error_code = absl::StatusCode::kInternal;
break;
case CURLE_BAD_FUNCTION_ARGUMENT:
case CURLE_OUT_OF_MEMORY:
case CURLE_NOT_BUILT_IN:
case CURLE_UNKNOWN_OPTION:
case CURLE_BAD_DOWNLOAD_RESUME:
error_code = absl::StatusCode::kInternal;
break;
default:
break;
}
absl::Status status(
error_code, tensorstore::StrCat("CURL error ", curl_easy_strerror(code),
detail.empty() ? "" : ": ", detail));
status.SetPayload("curl_code", absl::Cord(tensorstore::StrCat(code)));
MaybeAddSourceLocation(status, loc);
return status;
}
absl::Status CurlMCodeToStatus(CURLMcode code, std::string_view detail,
SourceLocation loc) {
if (code == CURLM_OK) {
return absl::OkStatus();
}
absl::Status status(
absl::StatusCode::kInternal,
tensorstore::StrCat("CURLM error ", curl_multi_strerror(code),
detail.empty() ? "" : ": ", detail));
status.SetPayload("curlm_code", absl::Cord(tensorstore::StrCat(code)));
MaybeAddSourceLocation(status, loc);
return status;
}
}
} | #include "tensorstore/internal/http/curl_wrappers.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_http::CurlCodeToStatus;
using ::tensorstore::internal_http::CurlMCodeToStatus;
TEST(CurlFactoryTest, CurlCodeToStatus) {
struct {
CURLcode curl;
absl::StatusCode expected;
} expected_codes[]{
{CURLE_OK, absl::StatusCode::kOk},
{CURLE_RECV_ERROR, absl::StatusCode::kUnavailable},
{CURLE_SEND_ERROR, absl::StatusCode::kUnavailable},
{CURLE_PARTIAL_FILE, absl::StatusCode::kUnavailable},
{CURLE_SSL_CONNECT_ERROR, absl::StatusCode::kUnavailable},
{CURLE_COULDNT_RESOLVE_HOST, absl::StatusCode::kUnavailable},
{CURLE_COULDNT_RESOLVE_PROXY, absl::StatusCode::kUnavailable},
{CURLE_COULDNT_CONNECT, absl::StatusCode::kUnavailable},
{CURLE_REMOTE_ACCESS_DENIED, absl::StatusCode::kPermissionDenied},
{CURLE_OPERATION_TIMEDOUT, absl::StatusCode::kDeadlineExceeded},
{CURLE_ABORTED_BY_CALLBACK, absl::StatusCode::kAborted},
{CURLE_FAILED_INIT, absl::StatusCode::kUnknown},
{CURLE_GOT_NOTHING, absl::StatusCode::kUnavailable},
{CURLE_AGAIN, absl::StatusCode::kUnknown},
{CURLE_HTTP2, absl::StatusCode::kUnavailable},
{CURLE_BAD_DOWNLOAD_RESUME, absl::StatusCode::kInternal},
{CURLE_RANGE_ERROR, absl::StatusCode::kInternal},
{CURLE_UNSUPPORTED_PROTOCOL, absl::StatusCode::kUnavailable},
};
for (auto const& t : expected_codes) {
auto actual = CurlCodeToStatus(t.curl, {});
EXPECT_EQ(t.expected, actual.code()) << "CURL code=" << t.curl;
}
}
TEST(CurlFactoryTest, CurlMCodeToStatus) {
struct {
CURLMcode curl;
absl::StatusCode expected;
} expected_codes[]{
{CURLM_OK, absl::StatusCode::kOk},
{CURLM_BAD_HANDLE, absl::StatusCode::kInternal},
{CURLM_BAD_EASY_HANDLE, absl::StatusCode::kInternal},
{CURLM_OUT_OF_MEMORY, absl::StatusCode::kInternal},
{CURLM_INTERNAL_ERROR, absl::StatusCode::kInternal},
};
for (auto const& t : expected_codes) {
auto actual = CurlMCodeToStatus(t.curl, {});
EXPECT_EQ(t.expected, actual.code()) << "CURLM code=" << t.curl;
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_wrappers.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_wrappers_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
28bbb614-9b57-4c27-aa28-be37452264de | cpp | google/tensorstore | curl_transport | tensorstore/internal/http/curl_transport.cc | tensorstore/internal/http/curl_transport_test.cc | #include "tensorstore/internal/http/curl_transport.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <curl/curl.h>
#include "tensorstore/internal/container/circular_queue.h"
#include "tensorstore/internal/cord_util.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/curl_factory.h"
#include "tensorstore/internal/http/curl_handle.h"
#include "tensorstore/internal/http/curl_wrappers.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/thread/thread.h"
ABSL_FLAG(std::optional<uint32_t>, tensorstore_http_threads, std::nullopt,
"Threads to use for http requests. "
"Overrides TENSORSTORE_HTTP_THREADS.");
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal_container::CircularQueue;
using ::tensorstore::internal_metrics::MetricMetadata;
namespace tensorstore {
namespace internal_http {
namespace {
auto& http_request_started = internal_metrics::Counter<int64_t>::New(
"/tensorstore/http/request_started",
MetricMetadata("HTTP requests started"));
auto& http_request_completed = internal_metrics::Counter<int64_t>::New(
"/tensorstore/http/request_completed",
MetricMetadata("HTTP requests completed"));
auto& http_request_bytes =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/request_bytes",
MetricMetadata("HTTP request bytes transmitted",
internal_metrics::Units::kBytes));
auto& http_request_header_bytes =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/request_header_bytes",
MetricMetadata("HTTP request bytes transmitted",
internal_metrics::Units::kBytes));
auto& http_response_codes = internal_metrics::Counter<int64_t, int>::New(
"/tensorstore/http/response_codes", "code",
MetricMetadata("HTTP response status code counts"));
auto& http_response_bytes =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/response_bytes",
MetricMetadata("HTTP response bytes received",
internal_metrics::Units::kBytes));
auto& http_active = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/http/active",
MetricMetadata("HTTP requests considered active"));
auto& http_total_time_ms =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/total_time_ms",
MetricMetadata("HTTP total latency (ms)",
internal_metrics::Units::kMilliseconds));
auto& http_first_byte_latency_us =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/first_byte_latency_us",
MetricMetadata("HTTP first byte received latency (us)",
internal_metrics::Units::kMicroseconds));
auto& http_poll_time_ns =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/http_poll_time_ns",
MetricMetadata("HTTP time spent in curl_multi_poll (ns)",
internal_metrics::Units::kNanoseconds));
uint32_t GetHttpThreads() {
return std::max(1u, GetFlagOrEnvValue(FLAGS_tensorstore_http_threads,
"TENSORSTORE_HTTP_THREADS")
.value_or(4u));
}
struct CurlRequestState {
std::shared_ptr<CurlHandleFactory> factory_;
CurlHandle handle_;
CurlHeaders headers_;
absl::Cord payload_;
absl::Cord::CharIterator payload_it_;
size_t payload_remaining_;
HttpResponseHandler* response_handler_ = nullptr;
size_t response_payload_size_ = 0;
bool status_set = false;
char error_buffer_[CURL_ERROR_SIZE];
CurlRequestState(std::shared_ptr<CurlHandleFactory> factory)
: factory_(std::move(factory)), handle_(CurlHandle::Create(*factory_)) {
error_buffer_[0] = 0;
handle_.SetOption(CURLOPT_ERRORBUFFER, error_buffer_);
handle_.SetOption(CURLOPT_BUFFERSIZE, 512 * 1024);
handle_.SetOption(CURLOPT_TCP_NODELAY, 1L);
handle_.SetOption(CURLOPT_WRITEDATA, this);
handle_.SetOption(CURLOPT_WRITEFUNCTION,
&CurlRequestState::CurlWriteCallback);
handle_.SetOption(CURLOPT_HEADERDATA, this);
handle_.SetOption(CURLOPT_HEADERFUNCTION,
&CurlRequestState::CurlHeaderCallback);
}
~CurlRequestState() {
handle_.SetOption(CURLOPT_WRITEDATA, nullptr);
handle_.SetOption(CURLOPT_WRITEFUNCTION, nullptr);
handle_.SetOption(CURLOPT_READDATA, nullptr);
handle_.SetOption(CURLOPT_READFUNCTION, nullptr);
handle_.SetOption(CURLOPT_SEEKDATA, nullptr);
handle_.SetOption(CURLOPT_SEEKFUNCTION, nullptr);
handle_.SetOption(CURLOPT_HEADERDATA, nullptr);
handle_.SetOption(CURLOPT_HEADERFUNCTION, nullptr);
handle_.SetOption(CURLOPT_ERRORBUFFER, nullptr);
CurlHandle::Cleanup(*factory_, std::move(handle_));
}
void Prepare(const HttpRequest& request, IssueRequestOptions options) {
handle_.SetOption(CURLOPT_URL, request.url.c_str());
std::string user_agent = request.user_agent + GetCurlUserAgentSuffix();
handle_.SetOption(CURLOPT_USERAGENT, user_agent.c_str());
curl_slist* head = nullptr;
size_t header_bytes_ = 0;
for (const std::string& h : request.headers) {
head = curl_slist_append(head, h.c_str());
header_bytes_ += h.size();
}
headers_.reset(head);
handle_.SetOption(CURLOPT_HTTPHEADER, headers_.get());
if (request.accept_encoding) {
handle_.SetOption(CURLOPT_ACCEPT_ENCODING, "");
}
if (options.request_timeout > absl::ZeroDuration()) {
auto ms = absl::ToInt64Milliseconds(options.request_timeout);
handle_.SetOption(CURLOPT_TIMEOUT_MS, ms > 0 ? ms : 1);
}
if (options.connect_timeout > absl::ZeroDuration()) {
auto ms = absl::ToInt64Milliseconds(options.connect_timeout);
handle_.SetOption(CURLOPT_CONNECTTIMEOUT_MS, ms > 0 ? ms : 1);
}
payload_ = std::move(options.payload);
payload_remaining_ = payload_.size();
if (payload_remaining_ > 0) {
payload_it_ = payload_.char_begin();
handle_.SetOption(CURLOPT_READDATA, this);
handle_.SetOption(CURLOPT_READFUNCTION,
&CurlRequestState::CurlReadCallback);
handle_.SetOption(CURLOPT_SEEKDATA, this);
handle_.SetOption(CURLOPT_SEEKFUNCTION,
&CurlRequestState::CurlSeekCallback);
}
if (request.method == "GET") {
handle_.SetOption(CURLOPT_PIPEWAIT, 1L);
handle_.SetOption(CURLOPT_HTTPGET, 1L);
} else if (request.method == "HEAD") {
handle_.SetOption(CURLOPT_NOBODY, 1L);
} else if (request.method == "PUT") {
handle_.SetOption(CURLOPT_UPLOAD, 1L);
handle_.SetOption(CURLOPT_PUT, 1L);
handle_.SetOption(CURLOPT_INFILESIZE_LARGE, payload_remaining_);
} else if (request.method == "POST") {
handle_.SetOption(CURLOPT_POST, 1L);
handle_.SetOption(CURLOPT_POSTFIELDSIZE_LARGE, payload_remaining_);
} else if (request.method == "PATCH") {
handle_.SetOption(CURLOPT_UPLOAD, 1L);
handle_.SetOption(CURLOPT_CUSTOMREQUEST, "PATCH");
handle_.SetOption(CURLOPT_POSTFIELDSIZE_LARGE, payload_remaining_);
} else {
handle_.SetOption(CURLOPT_CUSTOMREQUEST, request.method.c_str());
}
switch (options.http_version) {
case IssueRequestOptions::HttpVersion::kHttp1:
handle_.SetOption(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1);
break;
case IssueRequestOptions::HttpVersion::kHttp2:
handle_.SetOption(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_0);
break;
case IssueRequestOptions::HttpVersion::kHttp2TLS:
handle_.SetOption(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
break;
case IssueRequestOptions::HttpVersion::kHttp2PriorKnowledge:
handle_.SetOption(CURLOPT_HTTP_VERSION,
CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE);
break;
default:
break;
}
http_request_started.Increment();
http_request_bytes.Observe(payload_remaining_);
http_request_header_bytes.Observe(header_bytes_);
}
void SetForbidReuse() {
handle_.SetOption(CURLOPT_FORBID_REUSE, 1);
}
bool MaybeSetStatusAndProcess() {
if (status_set) return true;
auto status_code = handle_.GetResponseCode();
if (status_code < 200) return false;
response_handler_->OnStatus(status_code);
status_set = true;
return true;
}
static size_t CurlHeaderCallback(void* contents, size_t size, size_t nmemb,
void* userdata) {
auto* self = static_cast<CurlRequestState*>(userdata);
auto data =
std::string_view(static_cast<char const*>(contents), size * nmemb);
if (self->MaybeSetStatusAndProcess()) {
self->response_handler_->OnResponseHeader(data);
}
return data.size();
}
static size_t CurlWriteCallback(void* contents, size_t size, size_t nmemb,
void* userdata) {
auto* self = static_cast<CurlRequestState*>(userdata);
auto data =
std::string_view(static_cast<char const*>(contents), size * nmemb);
if (self->MaybeSetStatusAndProcess()) {
self->response_payload_size_ += data.size();
self->response_handler_->OnResponseBody(data);
}
return data.size();
}
static size_t CurlReadCallback(void* contents, size_t size, size_t nmemb,
void* userdata) {
auto* self = static_cast<CurlRequestState*>(userdata);
size_t n = std::min(size * nmemb, self->payload_remaining_);
internal::CopyCordToSpan(self->payload_it_, {static_cast<char*>(contents),
static_cast<ptrdiff_t>(n)});
self->payload_remaining_ -= n;
return n;
}
static int CurlSeekCallback(void* userdata, curl_off_t offset, int origin) {
if (origin != SEEK_SET) {
return CURL_SEEKFUNC_CANTSEEK;
}
auto* self = static_cast<CurlRequestState*>(userdata);
if (offset < 0 || offset > self->payload_.size()) {
return CURL_SEEKFUNC_FAIL;
}
self->payload_it_ = self->payload_.char_begin();
absl::Cord::Advance(&self->payload_it_, static_cast<size_t>(offset));
self->payload_remaining_ =
self->payload_.size() - static_cast<size_t>(offset);
return CURL_SEEKFUNC_OK;
}
};
class MultiTransportImpl {
public:
MultiTransportImpl(std::shared_ptr<CurlHandleFactory> factory,
size_t nthreads);
~MultiTransportImpl();
void EnqueueRequest(const HttpRequest& request, IssueRequestOptions options,
HttpResponseHandler* response_handler);
void FinishRequest(std::unique_ptr<CurlRequestState> state, CURLcode code);
private:
struct ThreadData {
std::atomic<int64_t> count = 0;
CurlMulti multi;
absl::Mutex mutex;
CircularQueue<std::unique_ptr<CurlRequestState>> pending{16};
bool done = false;
};
void Run(ThreadData& thread_data);
void MaybeAddPendingTransfers(ThreadData& thread_data);
void RemoveCompletedTransfers(ThreadData& thread_data);
std::shared_ptr<CurlHandleFactory> factory_;
std::atomic<bool> done_{false};
std::unique_ptr<ThreadData[]> thread_data_;
std::vector<internal::Thread> threads_;
};
MultiTransportImpl::MultiTransportImpl(
std::shared_ptr<CurlHandleFactory> factory, size_t nthreads)
: factory_(std::move(factory)) {
assert(factory_);
threads_.reserve(nthreads);
thread_data_ = std::make_unique<ThreadData[]>(nthreads);
for (size_t i = 0; i < nthreads; ++i) {
thread_data_[i].multi = factory_->CreateMultiHandle();
threads_.push_back(
internal::Thread({"curl_multi_thread"},
[this, index = i] { Run(thread_data_[index]); }));
}
}
MultiTransportImpl::~MultiTransportImpl() {
done_ = true;
for (size_t i = 0; i < threads_.size(); ++i) {
auto& thread_data = thread_data_[i];
absl::MutexLock l(&thread_data.mutex);
thread_data.done = true;
curl_multi_wakeup(thread_data.multi.get());
}
for (auto& thread : threads_) {
thread.Join();
}
for (size_t i = 0; i < threads_.size(); ++i) {
factory_->CleanupMultiHandle(std::move(thread_data_[i].multi));
}
}
void MultiTransportImpl::EnqueueRequest(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) {
if (done_.load()) {
response_handler->OnFailure(
absl::InternalError("MultiTransportImpl is shutting down"));
return;
}
auto state = std::make_unique<CurlRequestState>(factory_);
state->response_handler_ = response_handler;
state->Prepare(request, std::move(options));
size_t selected_index = 0;
for (size_t i = 1; i < threads_.size(); ++i) {
if (thread_data_[i].count < thread_data_[selected_index].count) {
selected_index = i;
}
}
auto& selected = thread_data_[selected_index];
absl::MutexLock l(&selected.mutex);
selected.pending.push_back(std::move(state));
selected.count++;
curl_multi_wakeup(selected.multi.get());
}
void MultiTransportImpl::FinishRequest(std::unique_ptr<CurlRequestState> state,
CURLcode code) {
if (code == CURLE_HTTP2) {
ABSL_LOG(WARNING) << "CURLE_HTTP2 " << state->error_buffer_;
state->SetForbidReuse();
}
http_request_completed.Increment();
http_response_bytes.Observe(state->response_payload_size_);
{
curl_off_t first_byte_us = 0;
state->handle_.GetInfo(CURLINFO_STARTTRANSFER_TIME_T, &first_byte_us);
http_first_byte_latency_us.Observe(first_byte_us);
}
{
curl_off_t total_time_us = 0;
state->handle_.GetInfo(CURLINFO_TOTAL_TIME_T, &total_time_us);
http_total_time_ms.Observe(total_time_us / 1000);
}
if (code != CURLE_OK) {
state->response_handler_->OnFailure(
CurlCodeToStatus(code, state->error_buffer_));
return;
}
http_response_codes.Increment(state->handle_.GetResponseCode());
assert(state->status_set);
state->response_handler_->OnComplete();
}
void MultiTransportImpl::Run(ThreadData& thread_data) {
for (;;) {
MaybeAddPendingTransfers(thread_data);
if (thread_data.count == 0) {
absl::MutexLock l(&thread_data.mutex);
if (thread_data.done) break;
thread_data.mutex.Await(absl::Condition(
+[](ThreadData* td) { return !td->pending.empty() || td->done; },
&thread_data));
if (thread_data.done) break;
continue;
}
const int timeout_ms = std::numeric_limits<int>::max();
int numfds = 0;
errno = 0;
auto start_poll = absl::Now();
CURLMcode mcode = curl_multi_poll(thread_data.multi.get(), nullptr, 0,
timeout_ms, &numfds);
if (mcode != CURLM_OK) {
ABSL_LOG(WARNING) << CurlMCodeToStatus(mcode, "in curl_multi_poll");
}
http_poll_time_ns.Observe(
absl::ToInt64Nanoseconds(absl::Now() - start_poll));
{
int running_handles = 0;
CURLMcode mcode;
do {
mcode = curl_multi_perform(thread_data.multi.get(), &running_handles);
http_active.Set(running_handles);
} while (mcode == CURLM_CALL_MULTI_PERFORM);
if (mcode != CURLM_OK) {
ABSL_LOG(WARNING) << CurlMCodeToStatus(mcode, "in curl_multi_perform");
}
}
RemoveCompletedTransfers(thread_data);
}
assert(thread_data.count == 0);
}
void MultiTransportImpl::MaybeAddPendingTransfers(ThreadData& thread_data) {
absl::MutexLock l(&thread_data.mutex);
while (!thread_data.pending.empty()) {
std::unique_ptr<CurlRequestState> state =
std::move(thread_data.pending.front());
thread_data.pending.pop_front();
assert(state != nullptr);
state->handle_.SetOption(CURLOPT_PRIVATE, state.get());
CURL* e = state->handle_.get();
CURLMcode mcode = curl_multi_add_handle(thread_data.multi.get(), e);
if (mcode == CURLM_OK) {
state.release();
} else {
thread_data.count--;
state->handle_.SetOption(CURLOPT_PRIVATE, nullptr);
state->response_handler_->OnFailure(
CurlMCodeToStatus(mcode, "in curl_multi_add_handle"));
}
};
}
void MultiTransportImpl::RemoveCompletedTransfers(ThreadData& thread_data) {
CURLMsg* m = nullptr;
do {
int messages_in_queue;
m = curl_multi_info_read(thread_data.multi.get(), &messages_in_queue);
if (m && m->msg == CURLMSG_DONE) {
CURLcode result = m->data.result;
CURL* e = m->easy_handle;
curl_multi_remove_handle(thread_data.multi.get(), e);
thread_data.count--;
CurlRequestState* pvt = nullptr;
curl_easy_getinfo(e, CURLINFO_PRIVATE, &pvt);
assert(pvt);
std::unique_ptr<CurlRequestState> state(pvt);
state->handle_.SetOption(CURLOPT_PRIVATE, nullptr);
FinishRequest(std::move(state), result);
}
} while (m != nullptr);
}
}
class CurlTransport::Impl : public MultiTransportImpl {
public:
using MultiTransportImpl::MultiTransportImpl;
};
CurlTransport::CurlTransport(std::shared_ptr<CurlHandleFactory> factory)
: impl_(std::make_unique<Impl>(std::move(factory),
GetHttpThreads())) {}
CurlTransport::~CurlTransport() = default;
void CurlTransport::IssueRequestWithHandler(
const HttpRequest& request, IssueRequestOptions options,
HttpResponseHandler* response_handler) {
assert(impl_);
impl_->EnqueueRequest(request, std::move(options), response_handler);
}
namespace {
struct GlobalTransport {
std::shared_ptr<HttpTransport> transport_;
std::shared_ptr<HttpTransport> Get() {
if (!transport_) {
transport_ =
std::make_shared<CurlTransport>(GetDefaultCurlHandleFactory());
}
return transport_;
}
void Set(std::shared_ptr<HttpTransport> transport) {
transport_ = std::move(transport);
}
};
ABSL_CONST_INIT absl::Mutex global_mu(absl::kConstInit);
static GlobalTransport& GetGlobalTransport() {
static auto* g = new GlobalTransport();
return *g;
}
}
std::shared_ptr<HttpTransport> GetDefaultHttpTransport() {
absl::MutexLock l(&global_mu);
return GetGlobalTransport().Get();
}
void SetDefaultHttpTransport(std::shared_ptr<HttpTransport> t) {
absl::MutexLock l(&global_mu);
return GetGlobalTransport().Set(std::move(t));
}
}
} | #ifdef _WIN32
#undef UNICODE
#define WIN32_LEAN_AND_MEAN
#endif
#include "tensorstore/internal/http/curl_transport.h"
#include <optional>
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/transport_test_utils.h"
#include "tensorstore/internal/thread/thread.h"
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::transport_test_utils::AcceptNonBlocking;
using ::tensorstore::transport_test_utils::AssertSend;
using ::tensorstore::transport_test_utils::CloseSocket;
using ::tensorstore::transport_test_utils::CreateBoundSocket;
using ::tensorstore::transport_test_utils::FormatSocketAddress;
using ::tensorstore::transport_test_utils::ReceiveAvailable;
using ::tensorstore::transport_test_utils::socket_t;
using ::testing::HasSubstr;
namespace {
class CurlTransportTest : public ::testing::Test {
public:
};
TEST_F(CurlTransportTest, Http1) {
auto transport = ::tensorstore::internal_http::GetDefaultHttpTransport();
auto socket = CreateBoundSocket();
ABSL_CHECK(socket.has_value());
auto hostport = FormatSocketAddress(*socket);
ABSL_CHECK(!hostport.empty());
static constexpr char kResponse[] =
"HTTP/1.1 200 OK\r\n"
"Content-Type: text/html\r\n"
"\r\n"
"<html>\n<body>\n<h1>Hello, World!</h1>\n</body>\n</html>\n";
std::string initial_request;
tensorstore::internal::Thread serve_thread({"serve_thread"}, [&] {
auto client_fd = AcceptNonBlocking(*socket);
ABSL_CHECK(client_fd.has_value());
initial_request = ReceiveAvailable(*client_fd);
AssertSend(*client_fd, kResponse);
CloseSocket(*client_fd);
});
auto response = transport->IssueRequest(
HttpRequestBuilder("POST", absl::StrCat("http:
.AddHeader("X-foo: bar")
.AddQueryParameter("name", "dragon")
.AddQueryParameter("age", "1234")
.EnableAcceptEncoding()
.BuildRequest(),
IssueRequestOptions(absl::Cord("Hello")));
ABSL_LOG(INFO) << response.status();
ABSL_LOG(INFO) << "Wait on server";
serve_thread.Join();
CloseSocket(*socket);
EXPECT_THAT(initial_request, HasSubstr("POST /?name=dragon&age=1234"));
EXPECT_THAT(initial_request,
HasSubstr(absl::StrCat("Host: ", hostport, "\r\n")));
EXPECT_THAT(initial_request, HasSubstr("Accept: **\r\n"));
EXPECT_THAT(request, HasSubstr("X-foo: bar\r\n"));
EXPECT_THAT(request, HasSubstr("Content-Length: 5"));
EXPECT_THAT(
request,
HasSubstr("Content-Type: application/x-www-form-urlencoded\r\n"));
EXPECT_THAT(request, HasSubstr("Hello"));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_transport.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_transport_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3ba84d55-f639-42ee-a830-911c1b3bb133 | cpp | google/tensorstore | zlib | tensorstore/internal/compression/zlib.cc | tensorstore/internal/compression/zlib_test.cc | #include "tensorstore/internal/compression/zlib.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "tensorstore/internal/compression/cord_stream_manager.h"
#include <zlib.h>
namespace tensorstore {
namespace zlib {
namespace {
struct InflateOp {
static int Init(z_stream* s, [[maybe_unused]] int level, int header_option) {
return inflateInit2(s, 15
+ header_option);
}
static int Process(z_stream* s, int flags) { return inflate(s, flags); }
static int Destroy(z_stream* s) { return inflateEnd(s); }
static constexpr bool kDataErrorPossible = true;
};
struct DeflateOp {
static int Init(z_stream* s, int level, int header_option) {
return deflateInit2(s, level, Z_DEFLATED,
15
+ header_option,
8 ,
Z_DEFAULT_STRATEGY);
}
static int Process(z_stream* s, int flags) { return deflate(s, flags); }
static int Destroy(z_stream* s) { return deflateEnd(s); }
static constexpr bool kDataErrorPossible = false;
};
template <typename Op>
absl::Status ProcessZlib(const absl::Cord& input, absl::Cord* output, int level,
bool use_gzip_header) {
z_stream s = {};
internal::CordStreamManager<z_stream, 16 * 1024>
stream_manager(s, input, output);
const int header_option = use_gzip_header ? 16
: 0;
int err = Op::Init(&s, level, header_option);
if (err != Z_OK) {
ABSL_CHECK(false);
}
struct StreamDestroyer {
z_stream* s;
~StreamDestroyer() { Op::Destroy(s); }
} stream_destroyer{&s};
while (true) {
const bool input_complete = stream_manager.FeedInputAndOutputBuffers();
err = Op::Process(&s, input_complete ? Z_FINISH : Z_NO_FLUSH);
const bool made_progress = stream_manager.HandleOutput();
if (err == Z_OK) continue;
if (err == Z_BUF_ERROR && made_progress) continue;
break;
}
switch (err) {
case Z_STREAM_END:
if (!stream_manager.has_input_remaining()) {
return absl::OkStatus();
}
[[fallthrough]];
case Z_NEED_DICT:
case Z_DATA_ERROR:
case Z_BUF_ERROR:
if (!Op::kDataErrorPossible) {
ABSL_CHECK(false);
}
return absl::InvalidArgumentError("Error decoding zlib-compressed data");
default:
ABSL_CHECK(false);
}
ABSL_UNREACHABLE();
}
}
void Encode(const absl::Cord& input, absl::Cord* output,
const Options& options) {
ProcessZlib<DeflateOp>(input, output, options.level, options.use_gzip_header)
.IgnoreError();
}
absl::Status Decode(const absl::Cord& input, absl::Cord* output,
bool use_gzip_header) {
return ProcessZlib<InflateOp>(input, output, 0, use_gzip_header);
}
}
} | #include "tensorstore/internal/compression/zlib.h"
#include <cstddef>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
namespace zlib = tensorstore::zlib;
class ZlibCompressorTest : public ::testing::TestWithParam<bool> {};
INSTANTIATE_TEST_SUITE_P(ZlibCompressorTestCases, ZlibCompressorTest,
::testing::Values(false, true));
TEST_P(ZlibCompressorTest, SmallRoundtrip) {
const bool use_gzip_header = GetParam();
zlib::Options options{6, use_gzip_header};
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result("abc"), decode_result("def");
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
TENSORSTORE_ASSERT_OK(
zlib::Decode(encode_result.Subcord(3, encode_result.size() - 3),
&decode_result, options.use_gzip_header));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST_P(ZlibCompressorTest, SmallRoundtripFragmented) {
const bool use_gzip_header = GetParam();
zlib::Options options{6, use_gzip_header};
const absl::Cord input = absl::MakeFragmentedCord(
{"The quick", " brown fox", " jumped over", " ", "the lazy dog."});
absl::Cord encode_result("abc"), decode_result("def");
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
std::vector<std::string> encode_result_fragments;
for (size_t i = 3; i < encode_result.size(); ++i) {
encode_result_fragments.push_back(std::string(encode_result.Subcord(i, 1)));
}
TENSORSTORE_ASSERT_OK(
zlib::Decode(absl::MakeFragmentedCord(encode_result_fragments),
&decode_result, options.use_gzip_header));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST_P(ZlibCompressorTest, LargeRoundtrip) {
const bool use_gzip_header = GetParam();
std::string input(100000, '\0');
unsigned char x = 0;
for (auto& v : input) {
v = x;
x += 7;
}
zlib::Options options{6, use_gzip_header};
absl::Cord encode_result, decode_result;
zlib::Encode(absl::Cord(input), &encode_result, options);
ASSERT_EQ(absl::OkStatus(), zlib::Decode(encode_result, &decode_result,
options.use_gzip_header));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, NonDefaultLevel) {
const bool use_gzip_header = GetParam();
zlib::Options options1{
0, use_gzip_header};
zlib::Options options2{9, use_gzip_header};
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
zlib::Encode(input, &encode_result1, options1);
zlib::Encode(input, &encode_result2, options2);
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(
zlib::Decode(encode_result2, &decode_result, options2.use_gzip_header));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, DecodeCorruptData) {
const bool use_gzip_header = GetParam();
zlib::Options options{6, use_gzip_header};
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
{
absl::Cord encode_result, decode_result;
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted[0] = 0;
EXPECT_THAT(zlib::Decode(absl::Cord(corrupted), &decode_result,
options.use_gzip_header),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
{
absl::Cord encode_result, decode_result;
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted.resize(corrupted.size() - 1);
EXPECT_THAT(zlib::Decode(absl::Cord(corrupted), &decode_result,
options.use_gzip_header),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zlib.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zlib_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
75a7738d-254a-4171-9083-e75edd0bb8bc | cpp | google/tensorstore | neuroglancer_compressed_segmentation | tensorstore/internal/compression/neuroglancer_compressed_segmentation.cc | tensorstore/internal/compression/neuroglancer_compressed_segmentation_test.cc | #include "tensorstore/internal/compression/neuroglancer_compressed_segmentation.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <string_view>
#include <vector>
#include "absl/base/internal/endian.h"
#include "absl/container/flat_hash_map.h"
namespace tensorstore {
namespace neuroglancer_compressed_segmentation {
constexpr size_t kBlockHeaderSize = 2;
void WriteBlockHeader(size_t encoded_value_base_offset,
size_t table_base_offset, size_t encoding_bits,
void* output) {
absl::little_endian::Store32(output,
table_base_offset | (encoding_bits << 24));
absl::little_endian::Store32(static_cast<char*>(output) + 4,
encoded_value_base_offset);
}
template <typename Label>
void EncodeBlock(const Label* input, const ptrdiff_t input_shape[3],
const ptrdiff_t input_byte_strides[3],
const ptrdiff_t block_shape[3], size_t base_offset,
size_t* encoded_bits_output, size_t* table_offset_output,
EncodedValueCache<Label>* cache, std::string* output) {
if (input_shape[0] == 0 && input_shape[1] == 0 && input_shape[2] == 0) {
*encoded_bits_output = 0;
*table_offset_output = 0;
return;
}
constexpr size_t num_32bit_words_per_label = sizeof(Label) / 4;
absl::flat_hash_map<Label, uint32_t> seen_values;
std::vector<Label> seen_values_inv;
const auto ForEachElement = [&](auto func) {
auto* input_z = reinterpret_cast<const char*>(input);
for (ptrdiff_t z = 0; z < input_shape[0]; ++z) {
auto* input_y = input_z;
for (ptrdiff_t y = 0; y < input_shape[1]; ++y) {
auto* input_x = input_y;
for (ptrdiff_t x = 0; x < input_shape[2]; ++x) {
func(z, y, x, *reinterpret_cast<const Label*>(input_x));
input_x += input_byte_strides[2];
}
input_y += input_byte_strides[1];
}
input_z += input_byte_strides[0];
}
};
Label previous_value = input[0] + 1;
ForEachElement([&](size_t z, size_t y, size_t x, Label value) {
if (value != previous_value) {
previous_value = value;
if (seen_values.emplace(value, 0).second) {
seen_values_inv.push_back(value);
}
}
});
std::sort(seen_values_inv.begin(), seen_values_inv.end());
for (size_t i = 0; i < seen_values_inv.size(); ++i) {
seen_values[seen_values_inv[i]] = static_cast<uint32_t>(i);
}
size_t encoded_bits = 0;
if (seen_values.size() != 1) {
encoded_bits = 1;
while ((size_t(1) << encoded_bits) < seen_values.size()) {
encoded_bits *= 2;
}
}
*encoded_bits_output = encoded_bits;
const size_t encoded_size_32bits =
(encoded_bits * block_shape[0] * block_shape[1] * block_shape[2] + 31) /
32;
const size_t encoded_value_base_offset = output->size();
assert((encoded_value_base_offset - base_offset) % 4 == 0);
size_t elements_to_write = encoded_size_32bits;
bool write_table;
{
auto it = cache->find(seen_values_inv);
if (it == cache->end()) {
write_table = true;
elements_to_write += seen_values.size() * num_32bit_words_per_label;
*table_offset_output =
(encoded_value_base_offset - base_offset) / 4 + encoded_size_32bits;
} else {
write_table = false;
*table_offset_output = it->second;
}
}
output->resize(encoded_value_base_offset + elements_to_write * 4);
char* output_ptr = output->data() + encoded_value_base_offset;
ForEachElement([&](size_t z, size_t y, size_t x, Label value) {
uint32_t index = seen_values.at(value);
size_t output_offset = x + block_shape[2] * (y + block_shape[1] * z);
void* cur_ptr = output_ptr + output_offset * encoded_bits / 32 * 4;
absl::little_endian::Store32(
cur_ptr, absl::little_endian::Load32(cur_ptr) |
(index << (output_offset * encoded_bits % 32)));
});
if (write_table) {
output_ptr =
output->data() + encoded_value_base_offset + encoded_size_32bits * 4;
for (auto value : seen_values_inv) {
for (size_t word_i = 0; word_i < num_32bit_words_per_label; ++word_i) {
absl::little_endian::Store32(
output_ptr + word_i * 4,
static_cast<uint32_t>(value >> (32 * word_i)));
}
output_ptr += num_32bit_words_per_label * 4;
}
cache->emplace(seen_values_inv,
static_cast<uint32_t>(*table_offset_output));
}
}
template <class Label>
void EncodeChannel(const Label* input, const ptrdiff_t input_shape[3],
const ptrdiff_t input_byte_strides[3],
const ptrdiff_t block_shape[3], std::string* output) {
EncodedValueCache<Label> cache;
const size_t base_offset = output->size();
ptrdiff_t grid_shape[3];
size_t block_index_size = kBlockHeaderSize;
for (size_t i = 0; i < 3; ++i) {
grid_shape[i] = (input_shape[i] + block_shape[i] - 1) / block_shape[i];
block_index_size *= grid_shape[i];
}
output->resize(base_offset + block_index_size * 4);
ptrdiff_t block[3];
for (block[0] = 0; block[0] < grid_shape[0]; ++block[0]) {
for (block[1] = 0; block[1] < grid_shape[1]; ++block[1]) {
for (block[2] = 0; block[2] < grid_shape[2]; ++block[2]) {
const size_t block_offset =
block[2] + grid_shape[2] * (block[1] + grid_shape[1] * block[0]);
ptrdiff_t input_block_shape[3];
ptrdiff_t input_offset = 0;
for (size_t i = 0; i < 3; ++i) {
auto pos = block[i] * block_shape[i];
input_block_shape[i] = std::min(block_shape[i], input_shape[i] - pos);
input_offset += pos * input_byte_strides[i];
}
const size_t encoded_value_base_offset =
(output->size() - base_offset) / 4;
size_t encoded_bits, table_offset;
EncodeBlock(reinterpret_cast<const Label*>(
reinterpret_cast<const char*>(input) + input_offset),
input_block_shape, input_byte_strides, block_shape,
base_offset, &encoded_bits, &table_offset, &cache, output);
WriteBlockHeader(
encoded_value_base_offset, table_offset, encoded_bits,
output->data() + base_offset + block_offset * kBlockHeaderSize * 4);
}
}
}
}
template <class Label>
void EncodeChannels(const Label* input, const ptrdiff_t input_shape[3 + 1],
const ptrdiff_t input_byte_strides[3 + 1],
const ptrdiff_t block_shape[3], std::string* output) {
const size_t base_offset = output->size();
output->resize(base_offset + input_shape[0] * 4);
for (ptrdiff_t channel_i = 0; channel_i < input_shape[0]; ++channel_i) {
absl::little_endian::Store32(output->data() + base_offset + channel_i * 4,
(output->size() - base_offset) / 4);
EncodeChannel(
reinterpret_cast<const Label*>(reinterpret_cast<const char*>(input) +
input_byte_strides[0] * channel_i),
input_shape + 1, input_byte_strides + 1, block_shape, output);
}
}
void ReadBlockHeader(const void* header, size_t* encoded_value_base_offset,
size_t* table_base_offset, size_t* encoding_bits) {
auto h = absl::little_endian::Load64(header);
*table_base_offset = h & 0xffffff;
*encoding_bits = (h >> 24) & 0xff;
*encoded_value_base_offset = (h >> 32) & 0xffffff;
}
template <typename Label>
bool DecodeBlock(size_t encoded_bits, const char* encoded_input,
const char* table_input, size_t table_size,
const ptrdiff_t block_shape[3],
const ptrdiff_t output_shape[3],
const ptrdiff_t output_byte_strides[3], Label* output) {
const auto for_each_position = [&](auto callback) {
auto* output_z = reinterpret_cast<char*>(output);
for (ptrdiff_t z = 0; z < output_shape[0]; ++z) {
auto* output_y = output_z;
for (ptrdiff_t y = 0; y < output_shape[1]; ++y) {
auto* output_x = output_y;
for (ptrdiff_t x = 0; x < output_shape[2]; ++x) {
auto& label = *reinterpret_cast<Label*>(output_x);
if (!callback(label, z, y, x)) return false;
output_x += output_byte_strides[2];
}
output_y += output_byte_strides[1];
}
output_z += output_byte_strides[0];
}
return true;
};
const auto read_label = [&](size_t index) -> Label {
if constexpr (sizeof(Label) == 4) {
return absl::little_endian::Load32(table_input + index * sizeof(Label));
} else {
return absl::little_endian::Load64(table_input + index * sizeof(Label));
}
};
if (encoded_bits == 0) {
if (table_size == 0) return false;
const Label label = read_label(0);
return for_each_position(
[&](Label& output_label, ptrdiff_t z, ptrdiff_t y, ptrdiff_t x) {
output_label = label;
return true;
});
}
const uint32_t encoded_value_mask = (1U << encoded_bits) - 1;
return for_each_position([&](Label& output_label, ptrdiff_t z, ptrdiff_t y,
ptrdiff_t x) {
size_t encoded_offset = x + block_shape[2] * (y + block_shape[1] * z);
auto index = absl::little_endian::Load32(
encoded_input + encoded_offset * encoded_bits / 32 * 4) >>
(encoded_offset * encoded_bits % 32) &
encoded_value_mask;
if (index >= table_size) return false;
output_label = read_label(index);
return true;
});
return true;
}
template <typename Label>
bool DecodeChannel(std::string_view input, const ptrdiff_t block_shape[3],
const ptrdiff_t output_shape[3],
const ptrdiff_t output_byte_strides[3], Label* output) {
if ((input.size() % 4) != 0) return false;
ptrdiff_t grid_shape[3];
size_t block_index_size = kBlockHeaderSize;
for (size_t i = 0; i < 3; ++i) {
grid_shape[i] = (output_shape[i] + block_shape[i] - 1) / block_shape[i];
block_index_size *= grid_shape[i];
}
if (input.size() / 4 < block_index_size) {
return false;
}
ptrdiff_t block[3];
for (block[0] = 0; block[0] < grid_shape[0]; ++block[0]) {
for (block[1] = 0; block[1] < grid_shape[1]; ++block[1]) {
for (block[2] = 0; block[2] < grid_shape[2]; ++block[2]) {
const size_t block_offset =
block[2] + grid_shape[2] * (block[1] + grid_shape[1] * block[0]);
ptrdiff_t output_block_shape[3];
ptrdiff_t output_offset = 0;
for (size_t i = 0; i < 3; ++i) {
auto pos = block[i] * block_shape[i];
output_block_shape[i] =
std::min(block_shape[i], output_shape[i] - pos);
output_offset += pos * output_byte_strides[i];
}
size_t encoded_value_base_offset;
size_t encoded_bits, table_offset;
ReadBlockHeader(input.data() + block_offset * kBlockHeaderSize * 4,
&encoded_value_base_offset, &table_offset,
&encoded_bits);
if (encoded_bits > 32 || (encoded_bits & (encoded_bits - 1)) != 0) {
return false;
}
if (encoded_value_base_offset > input.size() / 4 ||
table_offset > input.size() / 4) {
return false;
}
const size_t encoded_size_32bits =
(encoded_bits * block_shape[0] * block_shape[1] * block_shape[2] +
31) /
32;
if ((encoded_value_base_offset + encoded_size_32bits) * 4 >
input.size()) {
return false;
}
auto* block_output = reinterpret_cast<Label*>(
reinterpret_cast<char*>(output) + output_offset);
const char* encoded_input =
input.data() + encoded_value_base_offset * 4;
const char* table_input = input.data() + table_offset * 4;
const size_t table_size =
(input.size() - table_offset * 4) / sizeof(Label);
if (!DecodeBlock(encoded_bits, encoded_input, table_input, table_size,
block_shape, output_block_shape, output_byte_strides,
block_output)) {
return false;
}
}
}
}
return true;
}
template <typename Label>
bool DecodeChannels(std::string_view input, const ptrdiff_t block_shape[3],
const ptrdiff_t output_shape[3 + 1],
const ptrdiff_t output_byte_strides[3 + 1], Label* output) {
if ((input.size() % 4) != 0) return false;
if (input.size() / 4 < static_cast<size_t>(output_shape[0])) {
return false;
}
for (ptrdiff_t channel_i = 0; channel_i < output_shape[0]; ++channel_i) {
const size_t offset =
absl::little_endian::Load32(input.data() + channel_i * 4);
if (offset > input.size() / 4) {
return false;
}
if (!DecodeChannel(
input.substr(offset * 4), block_shape, output_shape + 1,
output_byte_strides + 1,
reinterpret_cast<Label*>(reinterpret_cast<char*>(output) +
output_byte_strides[0] * channel_i))) {
return false;
}
}
return true;
}
#define DO_INSTANTIATE(Label) \
template void EncodeBlock<Label>( \
const Label* input, const ptrdiff_t input_shape[3], \
const ptrdiff_t input_byte_strides[3], const ptrdiff_t block_shape[3], \
size_t base_offset, size_t* encoded_bits_output, \
size_t* table_offset_output, EncodedValueCache<Label>* cache, \
std::string* output); \
template void EncodeChannel<Label>( \
const Label* input, const ptrdiff_t input_shape[3], \
const ptrdiff_t input_byte_strides[3], const ptrdiff_t block_shape[3], \
std::string* output); \
template void EncodeChannels<Label>( \
const Label* input, const ptrdiff_t input_shape[3 + 1], \
const ptrdiff_t input_byte_strides[3 + 1], \
const ptrdiff_t block_shape[3], std::string* output); \
template bool DecodeBlock( \
size_t encoded_bits, const char* encoded_input, const char* table_input, \
size_t table_size, const ptrdiff_t block_shape[3], \
const ptrdiff_t output_shape[3], const ptrdiff_t output_byte_strides[3], \
Label* output); \
template bool DecodeChannel<Label>( \
std::string_view input, const ptrdiff_t block_shape[3], \
const ptrdiff_t output_shape[3], const ptrdiff_t output_byte_strides[3], \
Label* output); \
template bool DecodeChannels( \
std::string_view input, const ptrdiff_t block_shape[3], \
const ptrdiff_t output_shape[3 + 1], \
const ptrdiff_t output_byte_strides[3 + 1], Label* output); \
DO_INSTANTIATE(uint32_t)
DO_INSTANTIATE(uint64_t)
#undef DO_INSTANTIATE
}
} | #include "tensorstore/internal/compression/neuroglancer_compressed_segmentation.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/random.h"
namespace {
using ::tensorstore::neuroglancer_compressed_segmentation::DecodeBlock;
using ::tensorstore::neuroglancer_compressed_segmentation::DecodeChannel;
using ::tensorstore::neuroglancer_compressed_segmentation::DecodeChannels;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodeBlock;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodeChannel;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodeChannels;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodedValueCache;
std::vector<uint32_t> AsVec(std::string_view s) {
EXPECT_EQ(0, s.size() % 4);
std::vector<uint32_t> out(s.size() / 4);
for (size_t i = 0; i < out.size(); ++i) {
out[i] = absl::little_endian::Load32(s.data() + i * 4);
}
return out;
}
std::string FromVec(std::vector<uint32_t> v) {
std::string s;
s.resize(v.size() * 4);
for (size_t i = 0; i < v.size(); ++i) {
absl::little_endian::Store32(s.data() + i * 4, v[i]);
}
return s;
}
template <typename T>
void TestBlockRoundTrip(std::vector<T> input,
const std::ptrdiff_t (&input_shape)[3],
const std::ptrdiff_t (&block_shape)[3],
size_t expected_encoded_bits,
size_t expected_table_offset,
std::vector<uint32_t> expected_output,
EncodedValueCache<T> expected_cache) {
std::string output{1, 2, 3};
ASSERT_EQ(input_shape[0] * input_shape[1] * input_shape[2], input.size());
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[3] = {
input_shape[1] * input_shape[2] * s, input_shape[2] * s, s};
size_t encoded_bits;
size_t table_offset;
EncodedValueCache<uint64_t> cache;
const size_t initial_offset = output.size();
EncodeBlock(input.data(), input_shape, input_byte_strides, block_shape,
initial_offset, &encoded_bits, &table_offset, &cache, &output);
ASSERT_THAT(output.substr(0, 3), ::testing::ElementsAre(1, 2, 3));
EXPECT_EQ(expected_encoded_bits, encoded_bits);
EXPECT_EQ(expected_table_offset, table_offset);
EXPECT_EQ(expected_output, AsVec(output.substr(initial_offset)));
EXPECT_EQ(expected_cache, cache);
std::vector<T> decoded_output(input.size());
EXPECT_TRUE(DecodeBlock(
encoded_bits, output.data() + initial_offset,
output.data() + initial_offset + table_offset * 4,
(output.size() - (initial_offset + table_offset * 4)) / sizeof(T),
block_shape, input_shape, input_byte_strides, decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
template <typename T>
void TestSingleChannelRoundTrip(std::vector<T> input,
const std::ptrdiff_t (&input_shape)[3],
const std::ptrdiff_t (&block_shape)[3],
std::vector<uint32_t> expected_output) {
std::string output{1, 2, 3};
ASSERT_EQ(input_shape[0] * input_shape[1] * input_shape[2], input.size());
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[3] = {
input_shape[1] * input_shape[2] * s, input_shape[2] * s, s};
const size_t initial_offset = output.size();
EncodeChannel(input.data(), input_shape, input_byte_strides, block_shape,
&output);
ASSERT_THAT(output.substr(0, 3), ::testing::ElementsAre(1, 2, 3));
EXPECT_EQ(expected_output, AsVec(output.substr(initial_offset)));
std::vector<T> decoded_output(input.size());
std::vector<char> output_copy(output.begin() + initial_offset, output.end());
EXPECT_TRUE(DecodeChannel(
std::string_view(output_copy.data(), output_copy.size()), block_shape,
input_shape, input_byte_strides, decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
template <typename T>
void TestDecodeChannelError(std::string_view input,
const std::ptrdiff_t (&block_shape)[3],
const std::ptrdiff_t (&input_shape)[3]) {
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[3] = {
input_shape[1] * input_shape[2] * s, input_shape[2] * s, s};
std::vector<T> decoded_output(input_shape[0] * input_shape[1] *
input_shape[2]);
EXPECT_FALSE(DecodeChannel(input, block_shape, input_shape,
input_byte_strides, decoded_output.data()));
}
template <typename T>
void TestMultipleChannelsRoundTripBytes(
std::vector<T> input, const std::ptrdiff_t (&input_shape)[4],
const std::ptrdiff_t (&block_shape)[4],
std::vector<unsigned char> expected_output) {
std::string output{1, 2, 3};
ASSERT_EQ(input_shape[0] * input_shape[1] * input_shape[2] * input_shape[3],
input.size());
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[4] = {
input_shape[1] * input_shape[2] * input_shape[3] * s,
input_shape[2] * input_shape[3] * s, input_shape[3] * s, s};
const size_t initial_offset = output.size();
EncodeChannels(input.data(), input_shape, input_byte_strides, block_shape,
&output);
ASSERT_THAT(output.substr(0, 3), ::testing::ElementsAre(1, 2, 3));
EXPECT_THAT(output.substr(initial_offset),
::testing::ElementsAreArray(expected_output));
std::vector<T> decoded_output(input.size());
EXPECT_TRUE(DecodeChannels(output.substr(initial_offset), block_shape,
input_shape, input_byte_strides,
decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
TEST(EncodeBlockTest, Basic0) {
TestBlockRoundTrip<uint64_t>({3, 3, 3, 3},
{1, 2, 2},
{1, 2, 2},
0,
0,
{3, 0},
{{{3}, 0}});
}
TEST(EncodeBlockTest, Basic1) {
TestBlockRoundTrip<uint64_t>(
{4, 3, 4, 4},
{1, 2, 2},
{1, 2, 2},
1,
1,
{0b1101, 3, 0, 4, 0},
{{{3, 4}, 1}});
}
TEST(EncodeBlockTest, SizeMismatch) {
TestBlockRoundTrip<uint64_t>(
{4, 3, 4, 3},
{1, 2, 2},
{1, 2, 3},
1,
1,
{0b001001, 3, 0, 4, 0},
{{{3, 4}, 1}});
}
TEST(EncodeBlockTest, Basic2) {
TestBlockRoundTrip<uint64_t>(
{4, 3, 5, 4},
{1, 2, 2},
{1, 2, 2},
2,
1,
{0b01100001, 3, 0, 4, 0, 5, 0},
{{{3, 4, 5}, 1}});
}
TEST(EncodeChannelTest, Basic) {
TestSingleChannelRoundTrip<uint64_t>(
{4, 3, 5, 4, 1, 3, 3, 3},
{2, 2, 2},
{1, 2, 2},
{5 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0, 4, 0, 5, 0,
0b1110, 1, 0, 3, 0});
}
TEST(EncodeChannelTest, BasicCached) {
TestSingleChannelRoundTrip<uint64_t>(
{
4, 3, 5, 4,
1, 3, 3, 3,
3, 1, 1, 1,
5, 5, 3, 4,
},
{4, 2, 2},
{1, 2, 2},
{
9 | (2 << 24),
8,
16 | (1 << 24),
15,
16 | (1 << 24),
20,
9 | (2 << 24),
21,
0b01100001,
3,
0,
4,
0,
5,
0,
0b1110,
1,
0,
3,
0,
0b00000001,
0b01001010,
});
}
TEST(EncodeChannelTest, BasicCachedZeroBitsAtEnd) {
TestSingleChannelRoundTrip<uint64_t>(
{
3, 3, 3, 3,
3, 3, 3, 3,
3, 3, 3, 3,
3, 3, 3, 3,
},
{4, 2, 2},
{1, 2, 2},
{
8 | (0 << 24),
8,
8 | (0 << 24),
10,
8 | (0 << 24),
10,
8 | (0 << 24),
10,
3,
0,
});
}
TEST(EncodeChannelTest, BasicCached32) {
TestSingleChannelRoundTrip<uint32_t>(
{
4, 3, 5, 4,
1, 3, 3, 3,
3, 1, 1, 1,
5, 5, 3, 4,
},
{4, 2, 2},
{1, 2, 2},
{
9 | (2 << 24),
8,
13 | (1 << 24),
12,
13 | (1 << 24),
15,
9 | (2 << 24),
16,
0b01100001,
3,
4,
5,
0b1110,
1,
3,
0b00000001,
0b01001010,
});
}
TEST(EncodeChannelsTest, Basic1Channel1Block) {
TestMultipleChannelsRoundTripBytes<uint64_t>(
{4, 0, 4, 0},
{1, 1, 2, 2},
{1, 2, 2},
{
1, 0, 0, 0,
3, 0, 0, 1, 2, 0, 0, 0,
0b0101, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
4, 0, 0, 0,
0, 0, 0, 0,
});
}
TEST(DecodeChannelTest, SizeNotMultipleOf4) {
auto input = FromVec({5 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
input.resize(input.size() - 1);
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, Truncated) {
auto input = FromVec({5 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
input.resize(input.size() - 4);
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, NonPowerOf2EncodedBits) {
auto input = FromVec({5 | (3 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, MoreThan32EncodedBits) {
auto input = FromVec({5 | (33 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, MissingBlockHeaders) {
auto input = FromVec({5 | (3 << 24), 4, 12 | (1 << 24)});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, InvalidEncodedValueOffset) {
auto input = FromVec({5 | (2 << 24), 16, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, InvalidTableOffset) {
auto input = FromVec({16 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, MissingEncodedValues) {
auto input = FromVec(
{5 | (2 << 24), 4, 0 | (1 << 24), 11, 0b01100001, 3, 0, 4, 0, 5, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
template <typename T>
void RandomRoundTrip(size_t max_block_size, size_t max_input_size,
size_t max_channels, size_t max_distinct_ids,
size_t num_iterations) {
absl::BitGen gen;
for (size_t iter = 0; iter < num_iterations; ++iter) {
std::ptrdiff_t block_shape[3];
std::ptrdiff_t input_shape[4];
input_shape[0] = absl::Uniform(gen, 1u, max_channels + 1);
for (int i = 0; i < 3; ++i) {
block_shape[i] = absl::Uniform(gen, 1u, max_block_size + 1);
input_shape[i + 1] = absl::Uniform(gen, 1u, max_input_size + 1);
}
std::vector<T> input(input_shape[0] * input_shape[1] * input_shape[2] *
input_shape[3]);
std::vector<T> labels(max_distinct_ids);
for (auto& label : labels) {
label = absl::Uniform<T>(gen);
}
for (auto& label : input) {
label = labels[absl::Uniform(gen, 0u, labels.size())];
}
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[4] = {
input_shape[1] * input_shape[2] * input_shape[3] * s,
input_shape[2] * input_shape[3] * s, input_shape[3] * s, s};
std::string output;
EncodeChannels(input.data(), input_shape, input_byte_strides, block_shape,
&output);
std::vector<T> decoded_output(input.size());
EXPECT_TRUE(DecodeChannels(output, block_shape, input_shape,
input_byte_strides, decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
}
void RandomRoundTripBothDataTypes(size_t max_block_size, size_t max_input_size,
size_t max_channels, size_t max_distinct_ids,
size_t num_iterations) {
RandomRoundTrip<uint32_t>(max_block_size, max_input_size, max_channels,
max_distinct_ids, num_iterations);
RandomRoundTrip<uint64_t>(max_block_size, max_input_size, max_channels,
max_distinct_ids, num_iterations);
}
TEST(RoundTripTest, Random) {
RandomRoundTripBothDataTypes(4, 10,
3, 16,
100);
RandomRoundTripBothDataTypes(10, 16,
3, 1000,
100);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/neuroglancer_compressed_segmentation.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/neuroglancer_compressed_segmentation_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
66a0bc89-4701-4131-b068-d96673fd7351 | cpp | google/tensorstore | zip_details | tensorstore/internal/compression/zip_details.cc | tensorstore/internal/compression/zip_details_test.cc | #include "tensorstore/internal/compression/zip_details.h"
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <ctime>
#include <ios>
#include <limits>
#include <memory>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "riegeli/bytes/limiting_reader.h"
#include "riegeli/bytes/prefix_limiting_reader.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bzip2/bzip2_reader.h"
#include "riegeli/endian/endian_reading.h"
#include "riegeli/xz/xz_reader.h"
#include "riegeli/zlib/zlib_reader.h"
#include "riegeli/zstd/zstd_reader.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/riegeli/find.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zip {
namespace {
using ::riegeli::ReadLittleEndian16;
using ::riegeli::ReadLittleEndian32;
using ::riegeli::ReadLittleEndian64;
using ::riegeli::ReadLittleEndianSigned64;
ABSL_CONST_INIT internal_log::VerboseFlag zip_logging("zip_details");
const absl::Time kWindowsEpoch =
::absl::UnixEpoch() - ::absl::Seconds(11644473600);
absl::Time MakeMSDOSTime(uint16_t date, uint16_t time) {
struct tm dos_tm;
dos_tm.tm_mday = (uint16_t)(date & 0x1f);
dos_tm.tm_mon = (uint16_t)((date >> 5) & 0xf) - 1;
dos_tm.tm_year = (uint16_t)(date >> 9) + 80;
dos_tm.tm_hour = (uint16_t)(time >> 11);
dos_tm.tm_min = (uint16_t)((time >> 5) & 0x1f);
dos_tm.tm_sec = (uint16_t)(2 * (time & 0x1f));
dos_tm.tm_isdst = -1;
return absl::FromTM(dos_tm, absl::UTCTimeZone());
}
absl::Status ReadExtraField_Zip64_0001(riegeli::Reader &reader,
uint16_t tag_size, ZipEntry &entry) {
assert(tag_size >= 8);
entry.is_zip64 = true;
do {
if (tag_size >= 8 &&
entry.uncompressed_size == std::numeric_limits<uint32_t>::max()) {
if (!ReadLittleEndian64(reader, entry.uncompressed_size)) break;
tag_size -= 8;
}
if (tag_size >= 8 &&
entry.compressed_size == std::numeric_limits<uint32_t>::max()) {
if (!ReadLittleEndian64(reader, entry.compressed_size)) break;
tag_size -= 8;
}
if (tag_size >= 8 &&
entry.local_header_offset == std::numeric_limits<uint32_t>::max()) {
if (!ReadLittleEndian64(reader, entry.local_header_offset)) break;
tag_size -= 8;
}
return absl::OkStatus();
} while (false);
return absl::InvalidArgumentError("Failed to read ZIP64 extra field");
}
absl::Status ReadExtraField_Unix_000D(riegeli::Reader &reader,
uint16_t tag_size, ZipEntry &entry) {
assert(tag_size >= 12);
uint32_t ignored32;
uint32_t mtime;
uint32_t atime;
if (!ReadLittleEndian32(reader, atime) ||
!ReadLittleEndian32(reader, mtime) ||
!ReadLittleEndian32(reader, ignored32) ) {
return absl::InvalidArgumentError("Failed to read UNIX extra field");
}
entry.atime = absl::FromUnixSeconds(atime);
entry.mtime = absl::FromUnixSeconds(mtime);
return absl::OkStatus();
}
absl::Status ReadExtraField_NTFS_000A(riegeli::Reader &reader,
uint16_t tag_size, ZipEntry &entry) {
assert(tag_size >= 8);
uint32_t ignored32;
if (!ReadLittleEndian32(reader, ignored32)) {
return absl::InvalidArgumentError("Failed to read NTFS extra field");
}
tag_size -= 4;
uint16_t ntfs_tag, ntfs_size;
while (tag_size > 4) {
if (!ReadLittleEndian16(reader, ntfs_tag) ||
!ReadLittleEndian16(reader, ntfs_size)) {
break;
}
tag_size -= 4;
tag_size -= ntfs_size;
if (ntfs_tag == 0x0001 && ntfs_size == 24) {
uint64_t mtime;
uint64_t atime;
uint64_t ctime;
if (!ReadLittleEndian64(reader, mtime) ||
!ReadLittleEndian64(reader, atime) ||
!ReadLittleEndian64(reader, ctime)) {
return absl::InvalidArgumentError("Failed to read NTFS extra field");
}
entry.mtime = kWindowsEpoch + absl::Nanoseconds(mtime * 100);
entry.atime = kWindowsEpoch + absl::Nanoseconds(atime * 100);
} else {
reader.Skip(ntfs_size);
}
}
return absl::OkStatus();
}
absl::Status ReadExtraField_Unix_5455(riegeli::Reader &reader,
uint16_t tag_size, ZipEntry &entry) {
assert(tag_size >= 1);
uint8_t flags = 0;
uint32_t tstamp = 0;
do {
if (!reader.ReadByte(flags)) break;
--tag_size;
if (flags & 0x01 && tag_size >= 4) {
if (!ReadLittleEndian32(reader, tstamp)) break;
tag_size -= 4;
entry.mtime = absl::FromUnixSeconds(tstamp);
}
if (flags & 0x02 && tag_size >= 4) {
if (!ReadLittleEndian32(reader, tstamp)) break;
tag_size -= 4;
entry.atime = absl::FromUnixSeconds(tstamp);
}
if (flags & 0x04 && tag_size >= 4) {
if (!ReadLittleEndian32(reader, tstamp)) break;
tag_size -= 4;
}
return absl::OkStatus();
} while (false);
return absl::InvalidArgumentError(
"Failed to read unix timestamp extra field");
}
absl::Status ReadExtraField(riegeli::Reader &reader, ZipEntry &entry) {
uint16_t tag, tag_size;
absl::Status status;
while (reader.ok()) {
if (!ReadLittleEndian16(reader, tag) ||
!ReadLittleEndian16(reader, tag_size)) {
return absl::OkStatus();
}
ABSL_LOG_IF(INFO, zip_logging)
<< std::hex << "extra tag " << tag << " size " << tag_size;
auto pos = reader.pos();
switch (tag) {
case 0x0001:
status.Update(ReadExtraField_Zip64_0001(reader, tag_size, entry));
break;
case 0x000d:
status.Update(ReadExtraField_Unix_000D(reader, tag_size, entry));
break;
case 0x000a:
status.Update(ReadExtraField_NTFS_000A(reader, tag_size, entry));
break;
case 0x5455:
status.Update(ReadExtraField_Unix_5455(reader, tag_size, entry));
break;
case 0x7875:
break;
default:
break;
}
assert(reader.pos() <= pos + tag_size);
reader.Seek(pos + tag_size);
}
return status;
}
}
absl::Status ReadEOCD64Locator(riegeli::Reader &reader,
ZipEOCD64Locator &locator) {
if (!reader.Pull(ZipEOCD64Locator::kRecordSize)) {
return absl::InvalidArgumentError(
"ZIP EOCD64 Locator Entry insufficient data available");
}
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x07064b50) {
return absl::InvalidArgumentError(absl::StrFormat(
"Failed to read ZIP64 End of Central Directory Locator signature %08x",
signature));
}
uint32_t ignored32;
ReadLittleEndian32(reader, locator.disk_number_with_cd);
ReadLittleEndianSigned64(reader, locator.cd_offset);
ReadLittleEndian32(reader, ignored32);
if (locator.cd_offset < 0) {
ABSL_LOG_IF(INFO, zip_logging && !reader.ok()) << reader.status();
return absl::InvalidArgumentError(
"Failed to read ZIP64 End of Central Directory Locator");
}
return absl::OkStatus();
}
absl::Status ReadEOCD64(riegeli::Reader &reader, ZipEOCD &eocd) {
if (!reader.Pull(ZipEOCD::kEOCD64RecordSize)) {
return absl::InvalidArgumentError(
"ZIP EOCD Entry insufficient data available");
}
auto eocd_pos = reader.pos();
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x06064b50) {
return absl::InvalidArgumentError(
"Failed to read ZIP64 Central Directory Entry signature");
}
uint64_t eocd_size;
ReadLittleEndian64(reader, eocd_size);
if (eocd_size < 44 || !reader.Pull(eocd_size)) {
return absl::InvalidArgumentError(
"Failed to read ZIP64 End of Central Directory");
}
riegeli::LimitingReader oecd64_reader(
&reader,
riegeli::LimitingReaderBase::Options().set_exact_length(eocd_size));
uint16_t version_madeby;
uint16_t version_needed_to_extract;
uint32_t disk_number;
uint32_t disk_number_with_cd;
uint64_t total_num_entries;
ReadLittleEndian16(oecd64_reader, version_madeby);
ReadLittleEndian16(oecd64_reader, version_needed_to_extract);
ReadLittleEndian32(oecd64_reader, disk_number);
ReadLittleEndian32(oecd64_reader, disk_number_with_cd);
ReadLittleEndian64(oecd64_reader, eocd.num_entries);
ReadLittleEndian64(oecd64_reader, total_num_entries);
ReadLittleEndianSigned64(oecd64_reader, eocd.cd_size);
ReadLittleEndianSigned64(oecd64_reader, eocd.cd_offset);
if (disk_number != disk_number_with_cd ||
eocd.num_entries != total_num_entries ||
eocd.num_entries == std::numeric_limits<uint16_t>::max() ||
eocd.cd_size == std::numeric_limits<uint16_t>::max() ||
eocd.cd_offset == std::numeric_limits<uint32_t>::max() ||
eocd.cd_size < 0 || eocd.cd_offset < 0) {
return absl::InvalidArgumentError(
"Failed to read ZIP64 End of Central Directory");
}
oecd64_reader.Seek(eocd_size);
eocd.record_offset = eocd_pos;
return absl::OkStatus();
}
absl::Status ReadEOCD(riegeli::Reader &reader, ZipEOCD &eocd) {
if (!reader.Pull(ZipEOCD::kEOCDRecordSize)) {
return absl::InvalidArgumentError(
"ZIP EOCD Entry insufficient data available");
}
auto eocd_pos = reader.pos();
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x06054b50) {
return absl::InvalidArgumentError(
"Failed to read ZIP Central Directory Entry signature");
}
uint16_t disk_number;
uint16_t disk_number_with_cd;
uint16_t num_entries;
uint16_t total_num_entries;
uint32_t cd_size;
uint32_t cd_offset;
uint16_t comment_length;
ReadLittleEndian16(reader, disk_number);
ReadLittleEndian16(reader, disk_number_with_cd);
ReadLittleEndian16(reader, num_entries);
ReadLittleEndian16(reader, total_num_entries);
ReadLittleEndian32(reader, cd_size);
ReadLittleEndian32(reader, cd_offset);
ReadLittleEndian16(reader, comment_length);
if (num_entries != total_num_entries) {
ABSL_LOG(INFO) << "ZIP num_entries mismatch " << num_entries << " vs "
<< total_num_entries;
return absl::InvalidArgumentError(
"Failed to read ZIP End of Central Directory");
}
if (disk_number != disk_number_with_cd) {
ABSL_LOG(INFO) << "ZIP disk_number mismatch " << disk_number << " vs "
<< disk_number_with_cd;
return absl::InvalidArgumentError(
"Failed to read ZIP End of Central Directory");
}
if (comment_length > 0 && !reader.Read(comment_length, eocd.comment)) {
return absl::InvalidArgumentError(
"Failed to read ZIP End of Central Directory");
}
reader.VerifyEnd();
if (!reader.status().ok()) {
return absl::InvalidArgumentError(
"Failed to read ZIP End of Central Directory");
}
eocd.record_offset = eocd_pos;
eocd.num_entries = num_entries;
eocd.cd_size = cd_size;
eocd.cd_offset = cd_offset;
if (total_num_entries == std::numeric_limits<uint16_t>::max() ||
cd_offset == std::numeric_limits<uint32_t>::max()) {
eocd.cd_offset = std::numeric_limits<uint32_t>::max();
}
return absl::OkStatus();
}
std::variant<absl::Status, int64_t> TryReadFullEOCD(riegeli::Reader &reader,
ZipEOCD &eocd,
int64_t offset_adjustment) {
if (!internal::FindLast(
reader, std::string_view(reinterpret_cast<const char *>(kEOCDLiteral),
sizeof(kEOCDLiteral)))) {
return absl::InvalidArgumentError("Failed to find valid ZIP EOCD");
}
int64_t eocd_start = reader.pos();
ZipEOCD last_eocd{};
TENSORSTORE_RETURN_IF_ERROR(ReadEOCD(reader, last_eocd));
if (last_eocd.cd_offset != std::numeric_limits<uint32_t>::max()) {
eocd = last_eocd;
reader.Seek(eocd_start + 4);
return absl::OkStatus();
}
if (eocd_start < ZipEOCD64Locator::kRecordSize) {
return absl::InvalidArgumentError("Block does not contain EOCD64 Locator");
}
if (!reader.Seek(eocd_start - ZipEOCD64Locator::kRecordSize)) {
if (!reader.ok() && !reader.status().ok()) {
return MaybeAnnotateStatus(reader.status(),
"Failed to read EOCD64 Locator");
}
return absl::InvalidArgumentError("Failed to read EOCD64 Locator");
}
ZipEOCD64Locator locator;
TENSORSTORE_RETURN_IF_ERROR(ReadEOCD64Locator(reader, locator));
if (offset_adjustment < 0) {
return locator.cd_offset;
}
auto target_pos = locator.cd_offset - offset_adjustment;
if (target_pos < 0) {
assert(offset_adjustment > 0);
return locator.cd_offset;
}
if (!reader.Seek(target_pos)) {
if (!reader.ok() && !reader.status().ok()) {
return MaybeAnnotateStatus(reader.status(), "Failed to read EOCD64");
}
return absl::InvalidArgumentError("Failed to read EOCD64");
}
TENSORSTORE_RETURN_IF_ERROR(ReadEOCD64(reader, last_eocd));
eocd = last_eocd;
reader.Seek(eocd_start + 4);
return absl::OkStatus();
}
absl::Status ReadCentralDirectoryEntry(riegeli::Reader &reader,
ZipEntry &entry) {
if (!reader.Pull(ZipEntry::kCentralRecordSize)) {
return absl::InvalidArgumentError(
"ZIP Central Directory Entry insufficient data available");
}
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x02014b50) {
return absl::InvalidArgumentError(
"Failed to read ZIP Central Directory Entry signature");
}
uint32_t uncompressed_size = 0;
uint32_t compressed_size;
uint32_t relative_header_offset = 0;
uint16_t file_name_length = 0;
uint16_t extra_field_length = 0;
uint16_t file_comment_length = 0;
uint16_t last_mod_time;
uint16_t last_mod_date;
uint16_t ignored16;
uint16_t compression_method;
ReadLittleEndian16(reader, entry.version_madeby);
ReadLittleEndian16(reader, ignored16);
ReadLittleEndian16(reader, entry.flags);
ReadLittleEndian16(reader, compression_method);
ReadLittleEndian16(reader, last_mod_time);
ReadLittleEndian16(reader, last_mod_date);
ReadLittleEndian32(reader, entry.crc);
ReadLittleEndian32(reader, compressed_size);
ReadLittleEndian32(reader, uncompressed_size);
ReadLittleEndian16(reader, file_name_length);
ReadLittleEndian16(reader, extra_field_length);
ReadLittleEndian16(reader, file_comment_length);
ReadLittleEndian16(reader, ignored16);
ReadLittleEndian16(reader, entry.internal_fa);
ReadLittleEndian32(reader, entry.external_fa);
ReadLittleEndian32(reader, relative_header_offset);
entry.compressed_size = compressed_size;
entry.uncompressed_size = uncompressed_size;
entry.local_header_offset = relative_header_offset;
entry.mtime = MakeMSDOSTime(last_mod_date, last_mod_time);
entry.compression_method = static_cast<ZipCompression>(compression_method);
if (file_name_length > 0 && !reader.Read(file_name_length, entry.filename)) {
return absl::InvalidArgumentError(
"Failed to read ZIP Central Directory Entry (filename)");
}
assert(entry.filename.size() == file_name_length);
if (extra_field_length > 0) {
assert(extra_field_length > 4);
riegeli::LimitingReader extra_reader(
&reader, riegeli::LimitingReaderBase::Options().set_exact_length(
extra_field_length));
extra_reader.SetReadAllHint(true);
if (auto status = ReadExtraField(extra_reader, entry); !status.ok()) {
return status;
}
extra_reader.Seek(extra_field_length);
}
if (file_comment_length > 0 &&
!reader.Read(file_comment_length, entry.comment)) {
return absl::InvalidArgumentError(
"Failed to read ZIP Central Directory Entry (comment)");
}
entry.end_of_header_offset = reader.pos();
entry.estimated_read_size =
std::max(entry.compressed_size, entry.uncompressed_size) +
file_name_length + extra_field_length + ZipEntry::kLocalRecordSize +
(entry.flags & kHasDataDescriptor ? 12 : 0);
return absl::OkStatus();
}
absl::Status ReadLocalEntry(riegeli::Reader &reader, ZipEntry &entry) {
if (!reader.Pull(ZipEntry::kLocalRecordSize)) {
return absl::InvalidArgumentError(
"ZIP Local Entry insufficient data available");
}
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x04034b50) {
return absl::InvalidArgumentError(
"Failed to read ZIP Local Entry signature");
}
uint16_t ignored16;
uint16_t compression_method;
uint16_t last_mod_time;
uint16_t last_mod_date;
uint32_t uncompressed_size;
uint32_t compressed_size;
uint16_t file_name_length = 0;
uint16_t extra_field_length = 0;
ReadLittleEndian16(reader, ignored16);
ReadLittleEndian16(reader, entry.flags);
ReadLittleEndian16(reader, compression_method);
ReadLittleEndian16(reader, last_mod_time);
ReadLittleEndian16(reader, last_mod_date);
ReadLittleEndian32(reader, entry.crc);
ReadLittleEndian32(reader, compressed_size);
ReadLittleEndian32(reader, uncompressed_size);
ReadLittleEndian16(reader, file_name_length);
ReadLittleEndian16(reader, extra_field_length);
entry.version_madeby = 0;
entry.internal_fa = 0;
entry.external_fa = 0;
entry.local_header_offset = 0;
entry.estimated_read_size = 0;
entry.compressed_size = compressed_size;
entry.uncompressed_size = uncompressed_size;
entry.mtime = MakeMSDOSTime(last_mod_date, last_mod_time);
entry.compression_method = static_cast<ZipCompression>(compression_method);
if (file_name_length > 0 && !reader.Read(file_name_length, entry.filename)) {
return absl::InvalidArgumentError(
"Failed to read ZIP Local Entry (filename)");
}
assert(entry.filename.size() == file_name_length);
entry.end_of_header_offset = reader.pos() + extra_field_length;
if (extra_field_length > 0) {
assert(extra_field_length > 4);
riegeli::LimitingReader extra_reader(
&reader, riegeli::LimitingReaderBase::Options().set_exact_length(
extra_field_length));
extra_reader.SetReadAllHint(true);
if (auto status = ReadExtraField(extra_reader, entry); !status.ok()) {
return status;
}
extra_reader.Seek(extra_field_length);
}
return absl::OkStatus();
}
absl::Status ValidateEntryIsSupported(const ZipEntry &entry) {
if (entry.flags & 0x01 ||
entry.flags & (uint16_t{1} << 6) ||
entry.flags & (uint16_t{1} << 13) ||
entry.compression_method == ZipCompression::kAes) {
return absl::InvalidArgumentError(
tensorstore::StrCat("ZIP encryption is not supported"));
}
if (entry.compression_method != ZipCompression::kStore &&
entry.compression_method != ZipCompression::kDeflate &&
entry.compression_method != ZipCompression::kBzip2 &&
entry.compression_method != ZipCompression::kZStd &&
entry.compression_method != ZipCompression::kXZ) {
return absl::InvalidArgumentError(
tensorstore::StrCat("ZIP compression method ", entry.compression_method,
" is not supported"));
}
if (absl::EndsWith(entry.filename, "/")) {
return absl::InvalidArgumentError("ZIP directory entries cannot be read");
}
return absl::OkStatus();
}
tensorstore::Result<std::unique_ptr<riegeli::Reader>> GetRawReader(
riegeli::Reader *reader, ZipEntry &entry) {
assert(reader != nullptr);
if (entry.flags & kHasDataDescriptor) {
const auto start_pos = reader->pos();
if (!reader->Skip(entry.compressed_size)) {
return reader->status();
}
static constexpr size_t kZipDataDescriptorSize = 16;
static constexpr size_t kZip64DataDescriptorSize = 24;
if (!reader->Pull(entry.is_zip64 ? kZip64DataDescriptorSize
: kZipDataDescriptorSize)) {
return absl::DataLossError("Failed to read ZIP DataDescriptor");
}
uint32_t signature, crc32;
ReadLittleEndian32(*reader, signature);
ReadLittleEndian32(*reader, crc32);
if (signature != 0x08074b50) {
return absl::DataLossError(absl::StrFormat(
"Failed to read ZIP DataDescriptor signature %08x", signature));
}
if (entry.crc == 0) entry.crc = crc32;
if (entry.is_zip64) {
uint64_t compressed_size, uncompressed_size;
ReadLittleEndian64(*reader, compressed_size);
ReadLittleEndian64(*reader, uncompressed_size);
if (entry.compressed_size == 0) entry.compressed_size = compressed_size;
if (entry.uncompressed_size == 0)
entry.uncompressed_size = uncompressed_size;
} else {
uint32_t compressed_size, uncompressed_size;
ReadLittleEndian32(*reader, compressed_size);
ReadLittleEndian32(*reader, uncompressed_size);
if (entry.compressed_size == 0) {
entry.compressed_size = compressed_size;
}
if (entry.uncompressed_size == 0) {
entry.uncompressed_size = uncompressed_size;
}
}
if (!reader->Seek(start_pos)) {
return reader->status();
}
}
using Reader = riegeli::LimitingReader<riegeli::Reader *>;
return std::make_unique<Reader>(
reader, riegeli::LimitingReaderBase::Options().set_exact_length(
entry.compressed_size));
}
tensorstore::Result<std::unique_ptr<riegeli::Reader>> GetReader(
riegeli::Reader *reader, ZipEntry &entry) {
TENSORSTORE_ASSIGN_OR_RETURN(std::unique_ptr<riegeli::Reader> base_reader,
GetRawReader(reader, entry));
switch (entry.compression_method) {
case ZipCompression::kStore: {
using PLReader =
riegeli::PrefixLimitingReader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<PLReader>(
std::move(base_reader),
PLReader::Options().set_base_pos(reader->pos()));
}
case ZipCompression::kDeflate: {
using DeflateReader =
riegeli::ZlibReader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<DeflateReader>(
std::move(base_reader),
DeflateReader::Options().set_header(DeflateReader::Header::kRaw));
}
case ZipCompression::kBzip2: {
using Bzip2Reader =
riegeli::Bzip2Reader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<Bzip2Reader>(std::move(base_reader));
}
case ZipCompression::kZStd: {
using ZStdReader = riegeli::ZstdReader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<ZStdReader>(std::move(base_reader));
}
case ZipCompression::kXZ: {
using XzReader = riegeli::XzReader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<XzReader>(
std::move(base_reader), XzReader::Options()
.set_container(XzReader::Container::kXz)
.set_concatenate(true)
);
}
default:
break;
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Unsupported ZIP compression method ", entry.compression_method));
}
}
} | #include "tensorstore/internal/compression/zip_details.h"
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/flags/flag.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/string_reader.h"
#include "tensorstore/internal/riegeli/find.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::internal::FindFirst;
using ::tensorstore::internal::StartsWith;
using ::tensorstore::internal_zip::kCentralHeaderLiteral;
using ::tensorstore::internal_zip::kEOCDLiteral;
using ::tensorstore::internal_zip::kLocalHeaderLiteral;
using ::tensorstore::internal_zip::ReadCentralDirectoryEntry;
using ::tensorstore::internal_zip::ReadEOCD;
using ::tensorstore::internal_zip::ReadEOCD64Locator;
using ::tensorstore::internal_zip::ReadLocalEntry;
using ::tensorstore::internal_zip::TryReadFullEOCD;
using ::tensorstore::internal_zip::ZipCompression;
using ::tensorstore::internal_zip::ZipEntry;
using ::tensorstore::internal_zip::ZipEOCD;
using ::tensorstore::internal_zip::ZipEOCD64Locator;
using ::tensorstore::internal_zip::kCentralHeaderLiteral;
using ::tensorstore::internal_zip::kEOCD64Literal;
using ::tensorstore::internal_zip::kEOCD64LocatorLiteral;
using ::tensorstore::internal_zip::kEOCDLiteral;
using ::tensorstore::internal_zip::kLocalHeaderLiteral;
ABSL_FLAG(std::string, tensorstore_test_data, "",
"Path to internal/compression/testdata/data.zip");
namespace {
absl::Cord GetTestZipFileData() {
ABSL_CHECK(!absl::GetFlag(FLAGS_tensorstore_test_data).empty());
absl::Cord filedata;
TENSORSTORE_CHECK_OK(riegeli::ReadAll(
riegeli::FdReader(absl::GetFlag(FLAGS_tensorstore_test_data)), filedata));
ABSL_CHECK_EQ(filedata.size(), 319482);
return filedata;
}
static constexpr unsigned char kMinimalZip[] = {
0x50, 0x4b, 0x5, 0x6,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0};
static constexpr unsigned char kZip64OneEmptyFile[] = {
0x50, 0x4b, 0x03, 0x04, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x72,
0x5b, 0x40, 0x07, 0xa1, 0xea, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x01, 0x00, 0x14, 0x00, 0x2d, 0x01, 0x00, 0x10, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x61, 0x0a,
0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x4f, 0x72, 0x5b, 0x40, 0x07, 0xa1, 0xea, 0xdd, 0x02, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x80, 0x11, 0x00, 0x00, 0x00, 0x00, 0x2d,
0x50, 0x4b, 0x06, 0x06, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x50, 0x4b, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00,
0x2f, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static constexpr unsigned char kZipTest2[] = {
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d,
0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00,
0x00, 0x00, 0x04, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x55, 0x54,
0x09, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55,
0x78, 0x04, 0x00, 0x64, 0x00, 0x14, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a,
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x98,
0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69,
0x72, 0x2f, 0x55, 0x54, 0x09, 0x00, 0x03, 0x09, 0x15, 0xe4, 0x41, 0x9a,
0x15, 0xe4, 0x41, 0x55, 0x78, 0x04, 0x00, 0xe8, 0x03, 0x64, 0x00,
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d,
0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00,
0x00, 0x00, 0x0d, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69,
0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55, 0x54, 0x09, 0x00, 0x03,
0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x04, 0x00,
0xe8, 0x03, 0x64, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00,
0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00,
0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x55, 0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55,
0x78, 0x00, 0x00,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7b, 0x98, 0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x10, 0x00, 0xed, 0x41, 0x3c, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x55, 0x54, 0x05, 0x00, 0x03, 0x09,
0x15, 0xe4, 0x41, 0x55, 0x78, 0x00, 0x00,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00,
0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00,
0x05, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x77, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55,
0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x00, 0x00,
0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
0xca, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00,
};
template <size_t N>
std::string_view StringViewOf(const unsigned char (&str)[N]) {
return std::string_view(reinterpret_cast<const char*>(str), N);
}
TEST(ZipDetailsTest, DecodeEOCD) {
riegeli::StringReader string_reader(StringViewOf(kMinimalZip));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCDLiteral)));
ZipEOCD eocd;
ASSERT_THAT(ReadEOCD(string_reader, eocd), ::tensorstore::IsOk());
EXPECT_EQ(eocd.num_entries, 0);
EXPECT_EQ(eocd.cd_size, 0);
EXPECT_EQ(eocd.cd_offset, 0);
}
TEST(ZipDetailsTest, ReadEOCDZip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCDLiteral)));
ZipEOCD eocd;
ASSERT_THAT(ReadEOCD(string_reader, eocd), ::tensorstore::IsOk());
EXPECT_EQ(eocd.num_entries, 1);
EXPECT_EQ(eocd.cd_size, 47);
EXPECT_EQ(eocd.cd_offset, 53);
}
TEST(ZipDetailsTest, ReadEOCD6LocatorZip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCD64LocatorLiteral)));
ZipEOCD64Locator eocd64_locator;
ASSERT_THAT(ReadEOCD64Locator(string_reader, eocd64_locator),
::tensorstore::IsOk());
EXPECT_EQ(eocd64_locator.disk_number_with_cd, 0);
EXPECT_EQ(eocd64_locator.cd_offset, 100);
}
TEST(ZipDetailsTest, ReadEOCD64Zip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCD64Literal)));
EXPECT_EQ(100, string_reader.pos());
ZipEOCD eocd64;
ASSERT_THAT(ReadEOCD64(string_reader, eocd64), ::tensorstore::IsOk());
EXPECT_EQ(eocd64.num_entries, 1);
EXPECT_EQ(eocd64.cd_size, 47);
EXPECT_EQ(eocd64.cd_offset, 53);
}
TEST(ZipDetailsTest, TryReadFullEOCDZip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCD64Literal)));
EXPECT_EQ(100, string_reader.pos());
ZipEOCD eocd64;
ASSERT_THAT(TryReadFullEOCD(string_reader, eocd64, 0),
::testing::VariantWith<absl::Status>(::tensorstore::IsOk()));
EXPECT_EQ(eocd64.num_entries, 1);
EXPECT_EQ(eocd64.cd_size, 47);
EXPECT_EQ(eocd64.cd_offset, 53);
}
TEST(ZipDetailsTest, ReadCentralHeaderZip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_EQ(53, string_reader.pos());
ZipEntry central_header;
ASSERT_THAT(ReadCentralDirectoryEntry(string_reader, central_header),
::tensorstore::IsOk());
EXPECT_EQ(central_header.version_madeby, 798);
EXPECT_EQ(central_header.flags, 0);
EXPECT_EQ(central_header.compression_method, ZipCompression::kStore);
EXPECT_EQ(central_header.crc, 3723141383);
EXPECT_EQ(central_header.compressed_size, 2);
EXPECT_EQ(central_header.uncompressed_size, 2);
EXPECT_EQ(central_header.internal_fa, 1);
EXPECT_EQ(central_header.external_fa, 293601280);
EXPECT_EQ(central_header.local_header_offset, 0);
EXPECT_EQ(central_header.filename, "-");
EXPECT_EQ(central_header.comment, "");
EXPECT_GT(central_header.mtime, absl::UnixEpoch());
}
TEST(ZipDetailsTest, ReadLocalHeaderZip64) {
riegeli::StringReader string_reader(
reinterpret_cast<const char*>(kZip64OneEmptyFile),
sizeof(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kLocalHeaderLiteral)));
ZipEntry local_header;
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.version_madeby, 0);
EXPECT_EQ(local_header.flags, 0);
EXPECT_EQ(local_header.compression_method, ZipCompression::kStore);
EXPECT_EQ(local_header.crc, 3723141383);
EXPECT_EQ(local_header.compressed_size, 2);
EXPECT_EQ(local_header.uncompressed_size, 2);
EXPECT_EQ(local_header.internal_fa, 0);
EXPECT_EQ(local_header.external_fa, 0);
EXPECT_EQ(local_header.local_header_offset, 0);
EXPECT_EQ(local_header.filename, "-");
EXPECT_EQ(local_header.comment, "");
EXPECT_GT(local_header.mtime, absl::UnixEpoch());
}
TEST(ZipDetailsTest, Decode) {
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kZipTest2),
sizeof(kZipTest2));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCDLiteral)));
ZipEOCD eocd;
ASSERT_THAT(ReadEOCD(string_reader, eocd), ::tensorstore::IsOk());
EXPECT_EQ(eocd.num_entries, 3);
EXPECT_EQ(eocd.cd_size, 202);
EXPECT_EQ(eocd.cd_offset, 188);
string_reader.Seek(eocd.cd_offset);
std::vector<ZipEntry> central_headers;
for (size_t i = 0; i < eocd.num_entries; ++i) {
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kCentralHeaderLiteral)))
<< i;
ZipEntry header;
ASSERT_THAT(ReadCentralDirectoryEntry(string_reader, header),
::tensorstore::IsOk());
central_headers.push_back(std::move(header));
}
std::vector<ZipEntry> local_headers;
for (const auto& header : central_headers) {
ZipEntry local_header;
string_reader.Seek(header.local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
local_headers.push_back(std::move(local_header));
absl::Cord data;
string_reader.Read(local_headers.back().compressed_size, data);
}
ASSERT_THAT(local_headers.size(), 3);
for (size_t i = 0; i < local_headers.size(); ++i) {
EXPECT_EQ(local_headers[i].flags, central_headers[i].flags);
EXPECT_EQ(local_headers[i].compression_method,
central_headers[i].compression_method);
EXPECT_EQ(local_headers[i].crc, central_headers[i].crc);
EXPECT_EQ(local_headers[i].compressed_size,
central_headers[i].compressed_size);
EXPECT_EQ(local_headers[i].uncompressed_size,
central_headers[i].uncompressed_size);
EXPECT_EQ(local_headers[i].filename, central_headers[i].filename);
}
}
struct ZipDirectory {
ZipEOCD eocd;
std::vector<ZipEntry> entries;
};
absl::Status ReadDirectory(riegeli::Reader& reader, ZipDirectory& directory) {
int64_t initial_pos = reader.pos();
auto response =
tensorstore::internal_zip::TryReadFullEOCD(reader, directory.eocd, -1);
if (std::holds_alternative<int64_t>(response)) {
reader.Seek(initial_pos);
response =
tensorstore::internal_zip::TryReadFullEOCD(reader, directory.eocd, 0);
}
if (auto* status = std::get_if<absl::Status>(&response);
status != nullptr && !status->ok()) {
return std::move(*status);
}
if (std::holds_alternative<int64_t>(response)) {
return absl::InternalError("ZIP incomplete");
}
reader.Seek(directory.eocd.cd_offset);
std::vector<ZipEntry> central_headers;
for (size_t i = 0; i < directory.eocd.num_entries; ++i) {
ZipEntry header{};
if (auto entry_status = ReadCentralDirectoryEntry(reader, header);
!entry_status.ok()) {
return entry_status;
}
directory.entries.push_back(std::move(header));
}
return absl::OkStatus();
}
TEST(ZipDetailsTest, ReadDirectory) {
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kZipTest2),
sizeof(kZipTest2));
ZipDirectory dir;
EXPECT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
std::vector<ZipEntry> local_headers;
for (const auto& header : dir.entries) {
ZipEntry local_header;
string_reader.Seek(header.local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
local_headers.push_back(std::move(local_header));
}
EXPECT_THAT(local_headers.size(), 3);
for (size_t i = 0; i < local_headers.size(); ++i) {
EXPECT_EQ(local_headers[i].flags, dir.entries[i].flags);
EXPECT_EQ(local_headers[i].compression_method,
dir.entries[i].compression_method);
EXPECT_EQ(local_headers[i].crc, dir.entries[i].crc);
EXPECT_EQ(local_headers[i].compressed_size, dir.entries[i].compressed_size);
EXPECT_EQ(local_headers[i].uncompressed_size,
dir.entries[i].uncompressed_size);
EXPECT_EQ(local_headers[i].filename, dir.entries[i].filename);
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_headers[0]));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data, "test\n");
EXPECT_EQ(data.size(), local_headers[0].uncompressed_size);
}
TEST(ZipDetailsTest, Xz) {
static constexpr unsigned char kXZ[] = {
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x00, 0x00, 0x5f, 0x00, 0x89, 0x8a,
0x36, 0x4f, 0x28, 0xe2, 0xde, 0xa0, 0x48, 0x00, 0x00, 0x00, 0x40, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72,
0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0xfd, 0x37, 0x7a,
0x58, 0x5a, 0x00, 0x00, 0x00, 0xff, 0x12, 0xd9, 0x41, 0x02, 0x00, 0x21,
0x01, 0x00, 0x00, 0x00, 0x00, 0x37, 0x27, 0x97, 0xd6, 0xe0, 0x00, 0x3f,
0x00, 0x11, 0x5e, 0x00, 0x30, 0xec, 0xbd, 0xa0, 0xa3, 0x19, 0xd7, 0x9c,
0xf2, 0xec, 0x93, 0x6b, 0xfe, 0x81, 0xb3, 0x7a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x25, 0x40, 0x5c, 0x24, 0xa9, 0xbe, 0x06, 0x72, 0x9e,
0x7a, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x5a, 0x50, 0x4b, 0x01,
0x02, 0x14, 0x00, 0x14, 0x00, 0x00, 0x00, 0x5f, 0x00, 0x89, 0x8a, 0x36,
0x4f, 0x28, 0xe2, 0xde, 0xa0, 0x48, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00,
0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d,
0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x50, 0x4b,
0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x3d, 0x00,
0x00, 0x00, 0x75, 0x00, 0x00, 0x00, 0x00, 0x00,
};
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kXZ),
sizeof(kXZ));
ZipDirectory dir;
ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
EXPECT_THAT(dir.entries.size(), ::testing::Gt(0));
ZipEntry local_header;
string_reader.Seek(dir.entries[0].local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.compression_method, ZipCompression::kXZ);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data,
"aaaaaaaaaaaaaa\r\nbbbbbbbbbbbbbb\r\naaaaaaaaaaaaaa\r\ncccccccccccc"
"cc\r\n");
EXPECT_EQ(data.size(), local_header.uncompressed_size);
}
TEST(ZipDetailsTest, Zstd) {
static constexpr unsigned char kZStd[] = {
0x50, 0x4b, 0x03, 0x04, 0x3f, 0x00, 0x00, 0x00, 0x5d, 0x00, 0xa2, 0x69,
0xf2, 0x50, 0x28, 0xe2, 0xde, 0xa0, 0x20, 0x00, 0x00, 0x00, 0x40, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72,
0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x28, 0xb5, 0x2f,
0xfd, 0x20, 0x40, 0xbd, 0x00, 0x00, 0x68, 0x61, 0x61, 0x0d, 0x0a, 0x62,
0x0d, 0x0a, 0x61, 0x0d, 0x0a, 0x63, 0x0d, 0x0a, 0x04, 0x10, 0x00, 0xc7,
0x38, 0xc6, 0x31, 0x38, 0x2c, 0x50, 0x4b, 0x01, 0x02, 0x3f, 0x00, 0x3f,
0x00, 0x00, 0x00, 0x5d, 0x00, 0xa2, 0x69, 0xf2, 0x50, 0x28, 0xe2, 0xde,
0xa0, 0x20, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72, 0x65, 0x70, 0x65,
0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x3d, 0x00, 0x00, 0x00, 0x4d, 0x00,
0x00, 0x00, 0x00, 0x00,
};
riegeli::StringReader string_reader(StringViewOf(kZStd));
ZipDirectory dir;
ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
EXPECT_THAT(dir.entries.size(), ::testing::Gt(0));
ZipEntry local_header;
string_reader.Seek(dir.entries[0].local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.compression_method, ZipCompression::kZStd);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data,
"aaaaaaaaaaaaaa\r\nbbbbbbbbbbbbbb\r\naaaaaaaaaaaaaa\r\ncccccccccccc"
"cc\r\n");
EXPECT_EQ(data.size(), local_header.uncompressed_size);
}
TEST(ZipDetailsTest, Bzip2) {
static constexpr unsigned char kBzip2[] = {
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x54, 0x74,
0x45, 0x3c, 0x48, 0x40, 0x35, 0xb0, 0x2f, 0x00, 0x00, 0x00, 0x3c, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72,
0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x42, 0x5a, 0x68,
0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0x03, 0x64, 0xc8, 0x04, 0x00,
0x00, 0x07, 0x41, 0x00, 0x00, 0x10, 0x38, 0x00, 0x20, 0x00, 0x30, 0xcd,
0x34, 0x12, 0x6a, 0x7a, 0x95, 0x10, 0x26, 0x4e, 0xcd, 0x9f, 0x17, 0x72,
0x45, 0x38, 0x50, 0x90, 0x03, 0x64, 0xc8, 0x04, 0x50, 0x4b, 0x01, 0x02,
0x1e, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x54, 0x74, 0x45, 0x3c,
0x48, 0x40, 0x35, 0xb0, 0x2f, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfd, 0x81, 0x00, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72,
0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x50, 0x4b, 0x05,
0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x3d, 0x00, 0x00,
0x00, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00,
};
riegeli::StringReader string_reader(StringViewOf(kBzip2));
ZipDirectory dir;
ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
EXPECT_THAT(dir.entries.size(), ::testing::Gt(0));
ZipEntry local_header;
string_reader.Seek(dir.entries[0].local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.compression_method, ZipCompression::kBzip2);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data,
"aaaaaaaaaaaaaa\nbbbbbbbbbbbbbb\naaaaaaaaaaaaaa\ncccccccccccccc\n");
EXPECT_EQ(data.size(), local_header.uncompressed_size);
}
TEST(ZipDetailsTest, Deflate) {
static constexpr unsigned char kDeflate[] = {
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x56, 0x5e,
0x9c, 0x40, 0xb0, 0x91, 0x01, 0x58, 0x12, 0x00, 0x00, 0x00, 0x13, 0x00,
0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x66, 0x69, 0x72, 0x73, 0x74, 0x73,
0x65, 0x63, 0x6f, 0x6e, 0x64, 0x4b, 0xcb, 0x2c, 0x2a, 0x2e, 0x29, 0x48,
0x2c, 0x2a, 0x29, 0x4e, 0x4d, 0xce, 0xcf, 0x4b, 0x01, 0xb1, 0x00, 0x50,
0x4b, 0x01, 0x02, 0x1e, 0x03, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x56,
0x5e, 0x9c, 0x40, 0xb0, 0x91, 0x01, 0x58, 0x12, 0x00, 0x00, 0x00, 0x13,
0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0xb4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x66, 0x69, 0x72,
0x73, 0x74, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x50, 0x4b, 0x05, 0x06,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00, 0x00, 0x00,
0x3b, 0x00, 0x00, 0x00, 0x00, 0x00,
};
riegeli::StringReader string_reader(StringViewOf(kDeflate));
ZipDirectory dir;
ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
EXPECT_THAT(dir.entries.size(), ::testing::Gt(0));
ZipEntry local_header;
string_reader.Seek(dir.entries[0].local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.compression_method, ZipCompression::kDeflate);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data, "firstpartsecondpart");
EXPECT_EQ(data.size(), local_header.uncompressed_size);
}
TEST(TestdataTest, HeaderPositions) {
riegeli::CordReader reader(GetTestZipFileData());
EXPECT_TRUE(FindFirst(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0);
reader.Skip(4);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x19FA6);
reader.Skip(4);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x33F4D);
reader.Seek(0);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x4DEF3);
reader.Skip(4);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x4DF43);
reader.Skip(4);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x4DF94);
reader.Seek(0);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kEOCDLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kEOCDLiteral)));
EXPECT_THAT(reader.pos(), 0x4DFE4);
}
TEST(TestdataTest, LocalHeaderEntry) {
riegeli::CordReader reader(GetTestZipFileData());
ZipEntry header;
EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0);
ASSERT_THAT(ReadLocalEntry(reader, header), ::tensorstore::IsOk());
EXPECT_THAT(header.version_madeby, 0);
EXPECT_THAT(header.flags, 0x2);
EXPECT_THAT(header.compression_method, ZipCompression::kDeflate);
EXPECT_THAT(header.crc, 0x94EE1E3E);
EXPECT_THAT(header.compressed_size, 0x00019F62);
EXPECT_THAT(header.uncompressed_size, 0x00019F6F);
EXPECT_THAT(header.internal_fa, 0);
EXPECT_THAT(header.external_fa, 0);
EXPECT_THAT(header.local_header_offset, 0);
EXPECT_THAT(header.end_of_header_offset, 68);
EXPECT_THAT(header.filename, "data/a.png");
EXPECT_THAT(header.comment, "");
EXPECT_THAT(header.is_zip64, false);
}
TEST(TestdataTest, CentralHeaderEntry) {
riegeli::CordReader reader(GetTestZipFileData());
reader.Seek(0x4DEF3);
ASSERT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x4DEF3);
ZipEntry header{};
ASSERT_THAT(ReadCentralDirectoryEntry(reader, header), ::tensorstore::IsOk());
EXPECT_THAT(header.flags, 0x2);
EXPECT_THAT(header.compression_method, ZipCompression::kDeflate);
EXPECT_THAT(header.crc, 0x94EE1E3E);
EXPECT_THAT(header.compressed_size, 0x00019F62);
EXPECT_THAT(header.uncompressed_size, 0x00019F6F);
EXPECT_THAT(header.local_header_offset, 0);
EXPECT_THAT(header.end_of_header_offset, 24);
EXPECT_THAT(header.filename, "data/a.png");
EXPECT_THAT(header.comment, "");
EXPECT_THAT(header.is_zip64, false);
EXPECT_THAT(header.version_madeby, 0x031E);
EXPECT_THAT(header.internal_fa, 0);
EXPECT_THAT(header.external_fa, 0x81240001);
EXPECT_THAT(header.local_header_offset, 0);
EXPECT_THAT(header.estimated_read_size, 106415);
}
TEST(TestdataTest, EOCD) {
riegeli::CordReader reader(GetTestZipFileData());
ASSERT_TRUE(FindFirst(reader, StringViewOf(kEOCDLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kEOCDLiteral)));
EXPECT_THAT(reader.pos(), 0x4DFE4);
::tensorstore::internal_zip::ZipEOCD eocd{};
ASSERT_THAT(ReadEOCD(reader, eocd), ::tensorstore::IsOk());
EXPECT_THAT(eocd.num_entries, 3);
EXPECT_THAT(eocd.cd_size, 0x000000F1);
EXPECT_THAT(eocd.cd_offset, 0x0004DEF3);
EXPECT_THAT(eocd.comment, "");
}
TEST(TestdataTest, FileData) {
riegeli::CordReader reader(GetTestZipFileData());
ZipEntry header;
ASSERT_THAT(ReadLocalEntry(reader, header), ::tensorstore::IsOk());
EXPECT_THAT(reader.pos(), 0x0044);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entry_reader, tensorstore::internal_zip::GetReader(&reader, header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*entry_reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data.size(), header.uncompressed_size);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zip_details.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zip_details_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
31510ab2-ecd4-4755-b6bd-8f4bd89138d6 | cpp | google/tensorstore | raw_bytes_hex | tensorstore/internal/json_binding/raw_bytes_hex.cc | tensorstore/internal/json_binding/raw_bytes_hex_test.cc | #include "tensorstore/internal/json_binding/raw_bytes_hex.h"
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
namespace tensorstore {
namespace internal_json_binding {
namespace {
bool IsHexString(std::string_view s) {
for (char c : s) {
if (!(c >= '0' && c <= '9') && !(c >= 'a' && c <= 'f') &&
!(c >= 'A' && c <= 'F')) {
return false;
}
}
return true;
}
}
namespace raw_bytes_hex_binder {
absl::Status RawBytesHexImpl::operator()(std::true_type is_loading, NoOptions,
void* obj, ::nlohmann::json* j) const {
auto* s = j->get_ptr<const std::string*>();
if (!s || s->size() != 2 * num_bytes ||
!internal_json_binding::IsHexString(*s)) {
return absl::InvalidArgumentError(
absl::StrFormat("Expected string with %d hex digits, but received: %s",
num_bytes * 2, j->dump()));
}
std::string temp = absl::HexStringToBytes(*s);
assert(temp.size() == num_bytes);
std::memcpy(obj, temp.data(), num_bytes);
return absl::OkStatus();
}
absl::Status RawBytesHexImpl::operator()(std::false_type is_loading, NoOptions,
const void* obj,
::nlohmann::json* j) const {
*j = absl::BytesToHexString(
absl::string_view(reinterpret_cast<const char*>(obj), num_bytes));
return absl::OkStatus();
}
}
}
} | #include "tensorstore/internal/json_binding/raw_bytes_hex.h"
#include <string>
#include <tuple>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
using ::tensorstore::MatchesStatus;
TEST(RawBytesHexTest, RoundTrip) {
tensorstore::TestJsonBinderRoundTrip<std::array<unsigned char, 3>>(
{
{{{1, 2, 0xab}}, "0102ab"},
},
jb::RawBytesHex);
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<
std::array<unsigned char, 3>>(
{
{"0102AB", "0102ab"},
},
jb::RawBytesHex);
}
TEST(RawBytesHexTest, Invalid) {
tensorstore::TestJsonBinderFromJson<std::array<unsigned char, 3>>(
{
{1,
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string with 6 hex digits, but received: 1")},
{"0102zb", MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string with 6 hex "
"digits, but received: \"0102zb\"")},
},
jb::RawBytesHex);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/raw_bytes_hex.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/raw_bytes_hex_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ca4f7ed1-b2c4-4390-98ec-f748a7ec5ee4 | cpp | google/tensorstore | staleness_bound | tensorstore/internal/json_binding/staleness_bound.cc | tensorstore/internal/json_binding/staleness_bound_test.cc | #include "tensorstore/internal/json_binding/staleness_bound.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
namespace tensorstore {
namespace internal {
TENSORSTORE_DEFINE_JSON_BINDER(
StalenessBoundJsonBinder,
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
if (const auto* b = j->get_ptr<const bool*>()) {
*obj = *b ? absl::InfiniteFuture() : absl::InfinitePast();
} else if (j->is_number()) {
const double t = static_cast<double>(*j);
*obj = absl::UnixEpoch() + absl::Seconds(t);
} else if (*j == "open") {
obj->time = absl::InfiniteFuture();
obj->bounded_by_open_time = true;
} else {
return internal_json::ExpectedError(*j,
"boolean, number, or \"open\"");
}
} else {
if (obj->bounded_by_open_time) {
*j = "open";
} else {
const absl::Time& t = obj->time;
if (t == absl::InfiniteFuture()) {
*j = true;
} else if (t == absl::InfinitePast()) {
*j = false;
} else {
*j = absl::ToDoubleSeconds(t - absl::UnixEpoch());
}
}
}
return absl::OkStatus();
})
}
} | #include "tensorstore/internal/json_binding/staleness_bound.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/staleness_bound.h"
using ::tensorstore::MatchesJson;
using ::tensorstore::StalenessBound;
using ::testing::Optional;
namespace {
TEST(StalenessBoundJsonBinderTest, RoundTrip) {
tensorstore::TestJsonBinderToJson<StalenessBound>({
{StalenessBound{absl::InfinitePast()}, Optional(MatchesJson(false))},
{StalenessBound{absl::InfiniteFuture()}, Optional(MatchesJson(true))},
{StalenessBound::BoundedByOpen(), Optional(MatchesJson("open"))},
{StalenessBound{absl::UnixEpoch()}, Optional(MatchesJson(0))},
{StalenessBound{absl::UnixEpoch() + absl::Seconds(1)},
Optional(MatchesJson(1))},
});
}
TEST(StalenessBoundJsonBinderTest, FromJson) {
tensorstore::TestJsonBinderFromJson<StalenessBound>({
{false,
::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time, absl::InfinitePast()),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{true,
::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time, absl::InfiniteFuture()),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{"open", ::testing::Optional(::testing::Field(
&StalenessBound::bounded_by_open_time, true))},
{0, ::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time, absl::UnixEpoch()),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{1, ::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time,
absl::UnixEpoch() + absl::Seconds(1)),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{1u,
::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time,
absl::UnixEpoch() + absl::Seconds(1)),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{1.5,
::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time,
absl::UnixEpoch() + absl::Milliseconds(1500)),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/staleness_bound.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/staleness_bound_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5d214a45-f9ca-4faa-9496-7df736893bdf | cpp | google/tensorstore | std_variant | tensorstore/internal/json_binding/std_variant.cc | tensorstore/internal/json_binding/std_variant_test.cc | #include <stddef.h>
#include <string>
#include "absl/status/status.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_json_binding {
absl::Status GetVariantErrorStatus(span<const absl::Status> status_values) {
std::string error = "No matching value binder: ";
for (size_t i = 0; i < status_values.size(); ++i) {
if (i != 0) error += "; ";
error += status_values[i].message();
}
return absl::InvalidArgumentError(error);
}
}
} | #include "tensorstore/internal/json_binding/std_variant.h"
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = ::tensorstore::internal_json_binding;
namespace {
using ::tensorstore::MatchesStatus;
TEST(JsonBindingTest, VariantDefaultBinder) {
tensorstore::TestJsonBinderRoundTrip<std::variant<int, std::string>>({
{3, ::nlohmann::json(3)},
{"abc", ::nlohmann::json("abc")},
});
}
TEST(JsonBindingTest, VariantDefaultBinderError) {
EXPECT_THAT(
(jb::FromJson<std::variant<int, std::string>>(::nlohmann::json(false))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"No matching value binder: "
"Expected integer in the range .*, but received: false; "
"Expected string, but received: false"));
}
TEST(JsonBindingTest, VariantExplicitBinder) {
auto binder = jb::Object(jb::Variant(jb::Member("a"), jb::Member("b")));
tensorstore::TestJsonBinderRoundTrip<std::variant<int, std::string>>(
{
{3, {{"a", 3}}},
{"abc", {{"b", "abc"}}},
},
binder);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_variant.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_variant_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
09263936-949c-419b-9c6c-b6f3f0944461 | cpp | google/tensorstore | array_endian_codec | tensorstore/internal/riegeli/array_endian_codec.cc | tensorstore/internal/riegeli/array_endian_codec_test.cc | #include "tensorstore/internal/riegeli/array_endian_codec.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <string_view>
#include <utility>
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "riegeli/base/chain.h"
#include "riegeli/bytes/copy_all.h"
#include "riegeli/bytes/limiting_reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
using ::tensorstore::internal_metrics::MetricMetadata;
namespace tensorstore {
namespace internal {
namespace {
auto& contiguous_bytes = internal_metrics::Counter<int64_t>::New(
"/tensorstore/internal/riegeli/contiguous_bytes",
MetricMetadata("Endian codec bytes from contiguous buffers",
internal_metrics::Units::kBytes));
auto& noncontiguous_bytes = internal_metrics::Counter<int64_t>::New(
"/tensorstore/internal/riegeli/noncontiguous_bytes",
MetricMetadata("Endian codec bytes from non-contiguous buffers",
internal_metrics::Units::kBytes));
}
[[nodiscard]] bool EncodeArrayEndian(SharedArrayView<const void> decoded,
endian encoded_endian,
ContiguousLayoutOrder order,
riegeli::Writer& writer) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(decoded.dtype().id())];
assert(functions.copy != nullptr);
if ((encoded_endian == endian::native ||
functions.swap_endian_inplace == nullptr) &&
IsContiguousLayout(decoded, order)) {
const size_t length = decoded.num_elements() * decoded.dtype().size();
if (writer.PrefersCopying()) {
return writer.Write(std::string_view(
reinterpret_cast<const char*>(decoded.data()), length));
}
return writer.Write(
internal::MakeCordFromSharedPtr(std::move(decoded.pointer()), length));
}
const internal::ElementwiseFunction<1, void*>* write_func =
encoded_endian == endian::native ? &functions.write_native_endian
: &functions.write_swapped_endian;
return internal::IterateOverArrays(
{write_func, &writer},
nullptr, {order, include_repeated_elements}, decoded);
}
namespace {
class ContiguousBufferSinkWriter : public riegeli::Writer {
public:
std::shared_ptr<const void> data;
size_t expected_length;
size_t expected_alignment;
void DoFail() { Fail(absl::UnimplementedError("")); }
bool PushSlow(size_t min_length, size_t recommended_length) override {
DoFail();
return false;
}
bool ValidateContiguousBuffer(std::string_view buf) {
if (buf.size() != expected_length ||
(reinterpret_cast<uintptr_t>(buf.data()) % expected_alignment) != 0) {
DoFail();
return false;
}
return true;
}
template <typename T>
bool WriteCordLike(T&& src) {
if (this->data) {
DoFail();
return false;
}
auto buf = src.TryFlat();
if (!buf) {
DoFail();
return false;
}
if (!ValidateContiguousBuffer(*buf)) return false;
auto data = std::make_shared<absl::remove_cvref_t<T>>(std::forward<T>(src));
buf = data->TryFlat();
if (!buf) {
DoFail();
return false;
}
if (!ValidateContiguousBuffer(*buf)) return false;
this->data = std::shared_ptr<const void>(std::move(data), buf->data());
return true;
}
bool WriteSlow(const riegeli::Chain& src) override {
return WriteCordLike(src);
}
bool WriteSlow(const absl::Cord& src) override { return WriteCordLike(src); }
};
}
Result<SharedArray<const void>> DecodeArrayEndian(
riegeli::Reader& reader, DataType dtype, span<const Index> decoded_shape,
endian encoded_endian, ContiguousLayoutOrder order) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
size_t expected_length = dtype.size() * ProductOfExtents(decoded_shape);
const auto may_be_contiguous = [&] {
if (encoded_endian != endian::native &&
functions.swap_endian_inplace != nullptr) {
return false;
}
if (!reader.SupportsRewind()) {
return false;
}
if (!reader.SupportsSize()) {
return false;
}
auto size_opt = reader.Size();
if (!size_opt) return false;
if (*size_opt < expected_length ||
*size_opt - expected_length != reader.pos()) {
return false;
}
return true;
};
if (may_be_contiguous()) {
auto pos = reader.pos();
ContiguousBufferSinkWriter buffer_sink_writer;
buffer_sink_writer.expected_length = expected_length;
buffer_sink_writer.expected_alignment = dtype->alignment;
if (riegeli::CopyAll(reader, buffer_sink_writer, expected_length).ok()) {
absl::Status status;
if (functions.validate) {
if (!(*functions.validate)[IterationBufferKind::kContiguous](
nullptr, {1, static_cast<Index>(expected_length)},
IterationBufferPointer(
const_cast<void*>(buffer_sink_writer.data.get()), 0,
dtype.size()),
&status)) {
return status;
}
}
contiguous_bytes.IncrementBy(expected_length);
return tensorstore::SharedArray<const void>(
SharedElementPointer<const void>(std::move(buffer_sink_writer.data),
dtype),
decoded_shape, order);
}
if (!reader.Seek(pos)) {
return reader.status();
}
}
auto decoded =
tensorstore::AllocateArray(decoded_shape, order, default_init, dtype);
TENSORSTORE_RETURN_IF_ERROR(
DecodeArrayEndian(reader, encoded_endian, order, decoded));
reader.VerifyEnd();
if (!reader.ok()) {
return reader.status();
}
noncontiguous_bytes.IncrementBy(expected_length);
return decoded;
}
absl::Status DecodeArrayEndian(riegeli::Reader& reader, endian encoded_endian,
ContiguousLayoutOrder order,
ArrayView<void> decoded) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(decoded.dtype().id())];
assert(functions.copy != nullptr);
riegeli::LimitingReader limiting_reader(
&reader, riegeli::LimitingReaderBase::Options().set_exact_length(
decoded.dtype().size() * decoded.num_elements()));
[[maybe_unused]] const auto unused_result = internal::IterateOverArrays(
{encoded_endian == endian::native ? &functions.read_native_endian
: &functions.read_swapped_endian,
&limiting_reader},
nullptr, {order, include_repeated_elements}, decoded);
if (!limiting_reader.VerifyEndAndClose()) {
return limiting_reader.status();
}
return absl::OkStatus();
}
}
} | #include "tensorstore/internal/riegeli/array_endian_codec.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/string_reader.h"
#include "riegeli/zlib/zlib_reader.h"
#include "riegeli/zlib/zlib_writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using tensorstore::AllocateArray;
using tensorstore::c_order;
using tensorstore::ContiguousLayoutOrder;
using tensorstore::DataType;
using tensorstore::dtype_v;
using tensorstore::endian;
using tensorstore::fortran_order;
using tensorstore::Index;
using tensorstore::IsContiguousLayout;
using tensorstore::MatchesStatus;
using tensorstore::Result;
using tensorstore::SharedArray;
using tensorstore::span;
using tensorstore::internal::DecodeArrayEndian;
using tensorstore::internal::EncodeArrayEndian;
using tensorstore::internal::FlatCordBuilder;
Result<absl::Cord> EncodeArrayAsCord(SharedArray<const void> array,
endian endianness,
ContiguousLayoutOrder order) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
if (EncodeArrayEndian(array, endianness, order, writer) && writer.Close()) {
return encoded;
}
return writer.status();
}
Result<SharedArray<const void>> DecodeArrayFromCord(
DataType dtype, span<const Index> decoded_shape, absl::Cord encoded,
endian endianness, ContiguousLayoutOrder order) {
riegeli::CordReader reader{&encoded};
return DecodeArrayEndian(reader, dtype, decoded_shape, endianness, order);
}
template <typename T = uint32_t>
SharedArray<const void> MakeTestArray(ContiguousLayoutOrder order = c_order,
Index a = 1000, Index b = 2000) {
auto c_array = AllocateArray<T>({a, b}, order, tensorstore::default_init);
for (Index a_i = 0; a_i < a; ++a_i) {
for (Index b_i = 0; b_i < b; ++b_i) {
c_array(a_i, b_i) = static_cast<T>(a_i * b + b_i);
}
}
return c_array;
}
TEST(EncodeArrayEndianTest, ContiguousLayout) {
auto c_array = MakeTestArray();
auto f_array = tensorstore::MakeCopy(c_array, fortran_order);
Index num_elements = c_array.num_elements();
ASSERT_TRUE(IsContiguousLayout(c_array, c_order));
ASSERT_TRUE(IsContiguousLayout(f_array, fortran_order));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord c_encoded,
EncodeArrayAsCord(c_array, endian::native, c_order));
{
auto flat = c_encoded.TryFlat();
ASSERT_TRUE(flat);
EXPECT_EQ(reinterpret_cast<const char*>(c_array.data()), flat->data());
EXPECT_EQ(num_elements * c_array.dtype().size(), flat->size());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord f_encoded,
EncodeArrayAsCord(f_array, endian::native, fortran_order));
{
auto flat = f_encoded.TryFlat();
ASSERT_TRUE(flat);
EXPECT_EQ(reinterpret_cast<const char*>(f_array.data()), flat->data());
EXPECT_EQ(num_elements * c_array.dtype().size(), flat->size());
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord encoded,
EncodeArrayAsCord(c_array, endian::native, fortran_order));
EXPECT_EQ(f_encoded, encoded);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord encoded,
EncodeArrayAsCord(f_array, endian::native, c_order));
EXPECT_EQ(c_encoded, encoded);
}
}
Result<SharedArray<const void>> RoundTripArrayViaCord(
SharedArray<const void> array, endian endianness,
ContiguousLayoutOrder order) {
TENSORSTORE_ASSIGN_OR_RETURN(auto encoded,
EncodeArrayAsCord(array, endianness, order));
return DecodeArrayFromCord(array.dtype(), array.shape(), encoded, endianness,
order);
}
template <typename T = uint16_t>
void TestRoundTripNoCopy(ContiguousLayoutOrder order) {
auto orig_array = MakeTestArray<T>(order);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, RoundTripArrayViaCord(orig_array, endian::native, order));
ASSERT_EQ(orig_array.data(), decoded.data());
}
template <typename T = uint16_t>
void TestRoundTripCopy(ContiguousLayoutOrder order, endian endianness) {
auto orig_array = MakeTestArray<T>(order, 2, 3);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, RoundTripArrayViaCord(orig_array, endianness, order));
ASSERT_TRUE(tensorstore::AreArraysIdenticallyEqual(orig_array, decoded))
<< "orig_array=" << orig_array << ", decoded=" << decoded;
}
TEST(EncodeArrayEndianTest, BigEndian) {
auto orig_array = MakeTestArray<uint16_t>(c_order, 2, 3);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded, EncodeArrayAsCord(orig_array, endian::big, c_order));
EXPECT_THAT(encoded.Flatten(), ::testing::ElementsAreArray({
0,
0,
0,
1,
0,
2,
0,
3,
0,
4,
0,
5,
}));
}
TEST(DecodeArrayEndianTest, BigEndian) {
auto orig_array = MakeTestArray<uint16_t>(c_order, 2, 3);
std::string encoded{
0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5,
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded,
DecodeArrayFromCord(orig_array.dtype(), orig_array.shape(),
absl::Cord(encoded), endian::big, c_order));
EXPECT_EQ(orig_array, decoded);
}
TEST(EncodeArrayEndianTest, RoundTripNoCopyCOrder) {
TestRoundTripNoCopy(c_order);
}
TEST(EncodeArrayEndianTest, RoundTripNoCopyCOrderBool) {
TestRoundTripNoCopy<bool>(c_order);
}
TEST(DecodeArrayEndianTest, InvalidBool) {
std::string encoded{0, 1, 2, 1};
EXPECT_THAT(DecodeArrayFromCord(dtype_v<bool>, {{2, 2}}, absl::Cord(encoded),
endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid bool value: 2; at byte 2"));
}
TEST(DecodeArrayEndianTest, InvalidBoolNoCopy) {
std::string encoded;
FlatCordBuilder builder(1000 * 2000);
std::fill_n(builder.data(), builder.size(), 0);
builder.data()[builder.size() - 1] = 2;
EXPECT_THAT(
DecodeArrayFromCord(dtype_v<bool>, {{1000, 2000}},
std::move(builder).Build(), endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid bool value: 2"));
}
TEST(EncodeArrayEndianTest, RoundTripNoCopyFOrder) {
TestRoundTripNoCopy(fortran_order);
}
TEST(EncodeArrayEndianTest, RoundTripCopyCOrderBig) {
TestRoundTripCopy(c_order, endian::big);
}
TEST(EncodeArrayEndianTest, RoundTripCopyCOrderLittle) {
TestRoundTripCopy(c_order, endian::little);
}
TEST(EncodeArrayEndianTest, RoundTripCopyFOrderBig) {
TestRoundTripCopy(fortran_order, endian::big);
}
TEST(EncodeArrayEndianTest, RoundTripCopyFOrderLittle) {
TestRoundTripCopy(fortran_order, endian::little);
}
TEST(DecodeArrayEndianTest, StringReader) {
auto orig_array = MakeTestArray<uint8_t>(c_order, 2, 3);
std::string encoded{
0, 1, 2, 3, 4, 5,
};
riegeli::StringReader reader{encoded};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded,
DecodeArrayEndian(reader, orig_array.dtype(), orig_array.shape(),
endian::native, c_order));
EXPECT_EQ(orig_array, decoded);
}
TEST(DecodeArrayEndianTest, LengthTooShort) {
auto orig_array = MakeTestArray<uint8_t>(c_order, 2, 3);
std::string encoded{
0, 1, 2, 3, 4,
};
riegeli::StringReader reader{encoded};
EXPECT_THAT(
DecodeArrayEndian(reader, orig_array.dtype(), orig_array.shape(),
endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Not enough data.*"));
}
TEST(DecodeArrayEndianTest, LengthTooLong) {
auto orig_array = MakeTestArray<uint8_t>(c_order, 2, 3);
std::string encoded{
0, 1, 2, 3, 4, 5, 6,
};
riegeli::StringReader reader{encoded};
EXPECT_THAT(DecodeArrayEndian(reader, orig_array.dtype(), orig_array.shape(),
endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"End of data expected.*"));
}
TEST(EncodeArrayEndianTest, Zlib) {
auto orig_array = MakeTestArray<uint16_t>(c_order);
absl::Cord encoded;
{
riegeli::ZlibWriter writer{riegeli::CordWriter{&encoded}};
ASSERT_TRUE(EncodeArrayEndian(orig_array, endian::native, c_order, writer));
ASSERT_TRUE(writer.Close());
}
{
riegeli::ZlibReader reader{riegeli::CordReader{encoded}};
EXPECT_THAT(DecodeArrayEndian(reader, orig_array.dtype(),
orig_array.shape(), endian::native, c_order),
::testing::Optional(orig_array));
}
}
TEST(DecodeArrayEndianTest, Misaligned) {
int a = 1000, b = 2000;
int num_elements = a * b;
size_t buffer_size = 1000 * 2000 * 2 + 1;
std::unique_ptr<char[]> source(new char[1000 * 2000 * 2 + 1]);
for (int i = 0; i < num_elements; ++i) {
uint16_t x = static_cast<uint16_t>(i);
memcpy(&source[i * 2 + 1], &x, 2);
}
auto cord = absl::MakeCordFromExternal(
std::string_view(source.get() + 1, buffer_size - 1), [] {});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, DecodeArrayFromCord(dtype_v<uint16_t>, {{1000, 2000}}, cord,
endian::native, c_order));
ASSERT_NE(decoded.data(), &source[1]);
EXPECT_THAT(decoded, MakeTestArray<uint16_t>(c_order));
}
TEST(DecodeArrayEndianTest, Fragmented) {
auto c_array = MakeTestArray<uint16_t>();
size_t total_bytes = c_array.num_elements() * c_array.dtype().size();
std::vector<absl::Cord> parts{
absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(c_array.data()),
total_bytes / 2),
[] {}),
absl::MakeCordFromExternal(
std::string_view(
reinterpret_cast<const char*>(c_array.data()) + total_bytes / 2,
total_bytes / 2),
[] {})};
absl::Cord cord = absl::MakeFragmentedCord(parts);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, DecodeArrayFromCord(dtype_v<uint16_t>, {{1000, 2000}}, cord,
endian::native, c_order));
EXPECT_THAT(decoded, MakeTestArray<uint16_t>(c_order));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/riegeli/array_endian_codec.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/riegeli/array_endian_codec_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8a5a62c4-9b13-49de-ba2d-2ae94814a8b0 | cpp | google/tensorstore | find | tensorstore/internal/riegeli/find.cc | tensorstore/internal/riegeli/find_test.cc | #include "tensorstore/internal/riegeli/find.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cstring>
#include <optional>
#include <string_view>
#include "riegeli/bytes/reader.h"
namespace tensorstore {
namespace internal {
bool StartsWith(riegeli::Reader &reader, std::string_view needle) {
return reader.ok() &&
reader.Pull(needle.size()) &&
memcmp(reader.cursor(), needle.data(), needle.size()) == 0;
}
bool FindFirst(riegeli::Reader &reader, std::string_view needle) {
while (true) {
if (!reader.Pull(needle.size())) break;
auto end = reader.cursor() + reader.available();
auto pos = std::search(reader.cursor(), end, needle.begin(), needle.end());
if (pos != end) {
reader.move_cursor(pos - reader.cursor());
return true;
}
reader.move_cursor(1 + reader.available() - needle.size());
}
return false;
}
bool FindLast(riegeli::Reader &reader, std::string_view needle) {
if (reader.SupportsSize()) {
auto size = reader.Size();
if (size && reader.Pull(*size)) {
auto found_pos = std::string_view(reader.cursor(), *size).rfind(needle);
if (found_pos == std::string_view::npos) return false;
return reader.Seek(found_pos + reader.pos());
}
}
std::optional<uint64_t> found;
while (reader.ok()) {
for (size_t available = reader.available(); available > needle.size();
available = reader.available()) {
if (memcmp(reader.cursor(), needle.data(), needle.size()) == 0) {
found = reader.pos();
}
const char *pos = static_cast<const char *>(
memchr(reader.cursor() + 1, needle[0], available - 1));
if (pos == nullptr) {
reader.move_cursor(available);
break;
}
reader.move_cursor(pos - reader.cursor());
}
if (!reader.Pull(needle.size() - reader.available())) break;
}
return found.has_value() && reader.Seek(*found);
}
}
} | #include "tensorstore/internal/riegeli/find.h"
#include <stddef.h>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "riegeli/bytes/string_reader.h"
namespace {
using ::tensorstore::internal::FindFirst;
using ::tensorstore::internal::FindLast;
using ::tensorstore::internal::StartsWith;
static constexpr unsigned char kData[] = {
0x17, 0x16, 0xa1, 0xcb, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0xfe, 0xff,
0x03, 0x04, 0xbb, 0xcc, 0xc7, 0xb6, 0xbe, 0x5d, 0x7c, 0x2d, 0x23, 0x44,
0xa0, 0xbe, 0x13, 0x1b, 0x9a, 0x2d, 0xf2, 0x13, 0x6a, 0xfb, 0xad, 0xdb,
0x73, 0xf9, 0x3d, 0xbc, 0x5d, 0x7c, 0x6f, 0x41, 0xc0, 0xad, 0xf3, 0x31,
0x79, 0x7f, 0x89, 0xb2, 0xe4, 0xa9, 0xf5, 0x9d, 0xc0, 0x30, 0x23, 0x32,
0x99, 0x2c, 0x16, 0x42, 0xf5, 0x48, 0xd1, 0x79, 0xdb, 0x98, 0xb9, 0xc3,
0x6c, 0xa6, 0x50, 0xcd, 0x86, 0xb6, 0xd3, 0xa7, 0x57, 0x3b, 0xe6, 0x1d,
0xa5, 0xe2, 0x79, 0xe9, 0x2d, 0x19, 0xec, 0xa6, 0xf3, 0xa3, 0x50, 0x65,
0x03, 0x04, 0xbb, 0xcc, 0x1a, 0xc9, 0xec, 0xb2, 0xa6, 0x3e, 0xe0, 0x49,
0x6a, 0x30, 0xd7, 0x1f, 0x90, 0x08, 0x1c, 0x2a, 0x6b, 0xbd, 0x06, 0x9c,
0xef, 0xd2, 0x79, 0x20, 0x64, 0xbc, 0xb7, 0x75, 0xbb, 0xcd, 0xcc, 0xa8,
0x49, 0x8b, 0x30, 0x4f, 0x73, 0x7c, 0xb5, 0x6e, 0x08, 0x1b, 0xc2, 0x7f,
0xfb, 0xb1, 0xc4, 0x49, 0x89, 0x74, 0xe7, 0x8e, 0x9d, 0x6f, 0x44, 0x14,
0xbd, 0xdc, 0x6a, 0xd9, 0xcb, 0x53, 0x2b, 0xdc, 0x48, 0x6c, 0xa3, 0x14,
0x4e, 0xc0, 0x3b, 0x6b, 0x47, 0x50, 0xd5, 0x97, 0x84, 0x30, 0xd5, 0x28,
0x03, 0x04, 0xbb, 0xcc, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0xfe, 0xff,
};
constexpr const unsigned char kLiteral1[4] = {0x03, 0x04, 0xbb, 0xcc};
constexpr const unsigned char kLiteral2[3] = {0xff, 0xfe, 0xff};
TEST(FindTest, FindFirst) {
const std::string_view literal1(reinterpret_cast<const char*>(kLiteral1),
sizeof(kLiteral1));
const std::string_view literal2(reinterpret_cast<const char*>(kLiteral2),
sizeof(kLiteral2));
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kData),
sizeof(kData));
size_t positions[3] = {0, 0, 0};
for (int i = 0; i < 3; ++i) {
EXPECT_TRUE(FindFirst(string_reader, literal1));
EXPECT_TRUE(StartsWith(string_reader, literal1));
positions[i] = string_reader.pos();
string_reader.Skip(sizeof(kLiteral1));
}
EXPECT_FALSE(FindFirst(string_reader, literal1));
EXPECT_THAT(positions, ::testing::ElementsAre(12, 96, 180));
string_reader.Seek(0);
EXPECT_TRUE(FindFirst(string_reader, literal2));
EXPECT_THAT(string_reader.pos(), 9);
}
TEST(FindTest, FindLast) {
const std::string_view literal1(reinterpret_cast<const char*>(kLiteral1),
sizeof(kLiteral1));
const std::string_view literal2(reinterpret_cast<const char*>(kLiteral2),
sizeof(kLiteral2));
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kData),
sizeof(kData));
EXPECT_TRUE(FindLast(string_reader, literal1));
EXPECT_TRUE(StartsWith(string_reader, literal1));
EXPECT_THAT(string_reader.pos(), 180);
string_reader.Seek(0);
EXPECT_TRUE(FindLast(string_reader, literal2));
EXPECT_THAT(string_reader.pos(), 189);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/riegeli/find.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/riegeli/find_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f0985b7a-a24b-4d9e-9677-12ff0a6d68d6 | cpp | google/tensorstore | verbose_flag | tensorstore/internal/log/verbose_flag.cc | tensorstore/internal/log/verbose_flag_test.cc | #include "tensorstore/internal/log/verbose_flag.h"
#include <stddef.h>
#include <atomic>
#include <cassert>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/env.h"
ABSL_FLAG(std::string, tensorstore_verbose_logging, {},
"comma-separated list of tensorstore verbose logging flags")
.OnUpdate([]() {
if (!absl::GetFlag(FLAGS_tensorstore_verbose_logging).empty()) {
tensorstore::internal_log::UpdateVerboseLogging(
absl::GetFlag(FLAGS_tensorstore_verbose_logging), true);
}
});
namespace tensorstore {
namespace internal_log {
namespace {
ABSL_CONST_INIT absl::Mutex g_mutex(absl::kConstInit);
ABSL_CONST_INIT VerboseFlag* g_list_head ABSL_GUARDED_BY(g_mutex) = nullptr;
struct LoggingLevelConfig {
int default_level = -1;
absl::flat_hash_map<std::string, int> levels;
};
void UpdateLoggingLevelConfig(LoggingLevelConfig& config,
std::string_view input) {
auto& levels = config.levels;
for (std::string_view flag : absl::StrSplit(input, ',', absl::SkipEmpty())) {
const size_t eq = flag.rfind('=');
if (eq == flag.npos) {
levels.insert_or_assign(std::string(flag), 0);
continue;
}
if (eq == 0) continue;
int level;
if (!absl::SimpleAtoi(flag.substr(eq + 1), &level)) continue;
if (level < -1) {
level = -1;
} else if (level > 1000) {
level = 1000;
}
levels.insert_or_assign(std::string(flag.substr(0, eq)), level);
}
config.default_level = -1;
if (auto it = levels.find("all"); it != levels.end()) {
config.default_level = it->second;
}
}
int GetLevelForVerboseFlag(const LoggingLevelConfig& config,
std::string_view name) {
while (!name.empty()) {
auto it = config.levels.find(name);
if (it != config.levels.end()) {
return it->second;
}
auto pos = name.rfind('.');
if (pos == name.npos) {
break;
}
name = name.substr(0, pos);
}
return config.default_level;
}
LoggingLevelConfig& GetLoggingLevelConfig()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(g_mutex) {
static absl::NoDestructor<LoggingLevelConfig> flags{[] {
LoggingLevelConfig config;
if (auto env = internal::GetEnv("TENSORSTORE_VERBOSE_LOGGING"); env) {
UpdateLoggingLevelConfig(config, *env);
}
return config;
}()};
return *flags;
}
}
void UpdateVerboseLogging(std::string_view input, bool overwrite)
ABSL_LOCKS_EXCLUDED(g_mutex) {
ABSL_LOG(INFO) << "--tensorstore_verbose_logging=" << input;
LoggingLevelConfig config;
UpdateLoggingLevelConfig(config, input);
absl::MutexLock lock(&g_mutex);
VerboseFlag* slist = g_list_head;
LoggingLevelConfig& global_config = GetLoggingLevelConfig();
std::swap(global_config.levels, config.levels);
std::swap(global_config.default_level, config.default_level);
if (!overwrite) {
if (global_config.levels.count("all")) {
global_config.default_level = config.default_level;
}
global_config.levels.merge(config.levels);
}
int vlevel = GetLevelForVerboseFlag(global_config, "verbose_logging");
while (slist != nullptr) {
int value = GetLevelForVerboseFlag(global_config, slist->name_);
ABSL_LOG_IF(INFO, vlevel >= 1) << slist->name_ << "=" << value;
slist->value_.store(value, std::memory_order_seq_cst);
slist = slist->next_;
}
}
int VerboseFlag::RegisterVerboseFlag(VerboseFlag* flag) {
absl::MutexLock lock(&g_mutex);
int old_v = flag->value_.load(std::memory_order_relaxed);
if (old_v == kValueUninitialized) {
const auto& config = GetLoggingLevelConfig();
old_v = GetLevelForVerboseFlag(config, flag->name_);
flag->value_.store(old_v, std::memory_order_relaxed);
flag->next_ = std::exchange(g_list_head, flag);
}
return old_v;
}
bool VerboseFlag::VerboseFlagSlowPath(VerboseFlag* flag, int old_v, int level) {
if (ABSL_PREDICT_TRUE(old_v != kValueUninitialized)) {
return old_v >= level;
}
old_v = RegisterVerboseFlag(flag);
return ABSL_PREDICT_FALSE(old_v >= level);
}
static_assert(std::is_trivially_destructible<VerboseFlag>::value,
"VerboseFlag must be trivially destructible");
}
} | #include "tensorstore/internal/log/verbose_flag.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/attributes.h"
using ::tensorstore::internal_log::UpdateVerboseLogging;
using ::tensorstore::internal_log::VerboseFlag;
#define TENSORSTORE_VERBOSE_FLAG(X) \
[]() -> ::tensorstore::internal_log::VerboseFlag& { \
ABSL_CONST_INIT static ::tensorstore::internal_log::VerboseFlag flag(X); \
return flag; \
}()
namespace {
TEST(VerboseFlag, Basic) {
UpdateVerboseLogging("a=2", true);
ABSL_CONST_INIT static VerboseFlag a("a");
ABSL_CONST_INIT static VerboseFlag ab("a.b");
auto& b = TENSORSTORE_VERBOSE_FLAG("b");
EXPECT_THAT((bool)a, true);
EXPECT_THAT(a.Level(0), true);
EXPECT_THAT(a.Level(1), true);
EXPECT_THAT(a.Level(2), true);
EXPECT_THAT(a.Level(3), false);
EXPECT_THAT(ab.Level(3), false);
EXPECT_THAT(ab.Level(2), true);
EXPECT_THAT(ab.Level(1), true);
EXPECT_THAT(ab.Level(0), true);
EXPECT_THAT((bool)ab, true);
EXPECT_THAT((bool)b, false);
EXPECT_THAT(b.Level(0), false);
UpdateVerboseLogging("b,a=-1,a.b=1", false);
EXPECT_THAT((bool)a, false);
EXPECT_THAT(a.Level(0), false);
EXPECT_THAT(a.Level(1), false);
EXPECT_THAT((bool)ab, true);
EXPECT_THAT(ab.Level(0), true);
EXPECT_THAT(ab.Level(1), true);
EXPECT_THAT(ab.Level(2), false);
EXPECT_THAT((bool)b, true);
EXPECT_THAT(b.Level(0), true);
EXPECT_THAT(b.Level(1), false);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/log/verbose_flag.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/log/verbose_flag_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4c8b14a4-a2fa-45f8-ab69-619597b9724e | cpp | google/tensorstore | admission_queue | tensorstore/internal/rate_limiter/admission_queue.cc | tensorstore/internal/rate_limiter/admission_queue_test.cc | #include "tensorstore/internal/rate_limiter/admission_queue.h"
#include <stddef.h>
#include <cassert>
#include <limits>
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/container/intrusive_linked_list.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
namespace tensorstore {
namespace internal {
AdmissionQueue::AdmissionQueue(size_t limit)
: limit_(limit == 0 ? std::numeric_limits<size_t>::max() : limit) {}
void AdmissionQueue::Admit(RateLimiterNode* node, RateLimiterNode::StartFn fn) {
assert(node->next_ == nullptr);
assert(node->prev_ == nullptr);
assert(node->start_fn_ == nullptr);
node->start_fn_ = fn;
{
absl::MutexLock lock(&mutex_);
if (in_flight_++ >= limit_) {
internal::intrusive_linked_list::InsertBefore(RateLimiterNodeAccessor{},
&head_, node);
return;
}
}
RunStartFunction(node);
}
void AdmissionQueue::Finish(RateLimiterNode* node) {
assert(node->next_ == nullptr);
RateLimiterNode* next_node = nullptr;
{
absl::MutexLock lock(&mutex_);
in_flight_--;
next_node = head_.next_;
if (next_node == &head_) return;
internal::intrusive_linked_list::Remove(RateLimiterNodeAccessor{},
next_node);
}
RunStartFunction(next_node);
}
}
} | #include "tensorstore/internal/rate_limiter/admission_queue.h"
#include <stddef.h>
#include <atomic>
#include <utility>
#include <gtest/gtest.h>
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/util/executor.h"
namespace {
using ::tensorstore::Executor;
using ::tensorstore::ExecutorTask;
using ::tensorstore::internal::AdmissionQueue;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::MakeIntrusivePtr;
using ::tensorstore::internal::RateLimiterNode;
struct Node : public RateLimiterNode, public AtomicReferenceCount<Node> {
AdmissionQueue* queue_;
ExecutorTask task_;
Node(AdmissionQueue* queue, ExecutorTask task)
: queue_(queue), task_(std::move(task)) {}
~Node() { queue_->Finish(this); }
static void Start(void* task) {
IntrusivePtr<Node> self(reinterpret_cast<Node*>(task), adopt_object_ref);
std::move(self->task_)();
}
};
TEST(AdmissionQueueTest, Basic) {
AdmissionQueue queue(1);
std::atomic<size_t> done{0};
EXPECT_EQ(1, queue.limit());
EXPECT_EQ(0, queue.in_flight());
{
for (int i = 0; i < 100; i++) {
auto node = MakeIntrusivePtr<Node>(&queue, [&done] { done++; });
intrusive_ptr_increment(node.get());
queue.Admit(node.get(), &Node::Start);
}
}
EXPECT_EQ(100, done);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/rate_limiter/admission_queue.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/rate_limiter/admission_queue_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f3ea2835-ae58-4400-aa72-323aa7b250ca | cpp | google/tensorstore | scaling_rate_limiter | tensorstore/internal/rate_limiter/scaling_rate_limiter.cc | tensorstore/internal/rate_limiter/scaling_rate_limiter_test.cc | #include "tensorstore/internal/rate_limiter/scaling_rate_limiter.h"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <functional>
#include <limits>
#include <utility>
#include "absl/log/absl_check.h"
#include "absl/time/time.h"
#include "tensorstore/internal/rate_limiter/token_bucket_rate_limiter.h"
namespace tensorstore {
namespace internal {
namespace {
double GetLogA(absl::Duration doubling_time) {
if (doubling_time <= absl::ZeroDuration() ||
doubling_time == absl::InfiniteDuration()) {
return 0;
}
return 0.69314718055994530941723212145817656 /
absl::ToDoubleSeconds(doubling_time);
}
double GetMaxAvailable(double initial_rate) {
return std::min(initial_rate * 1000.0, 2000.0);
}
}
DoublingRateLimiter::DoublingRateLimiter(double initial_rate,
absl::Duration doubling_time)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate)),
initial_rate_(initial_rate),
doubling_time_(doubling_time),
a_(GetLogA(doubling_time)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
ABSL_CHECK_GT(a_, 0);
}
DoublingRateLimiter::DoublingRateLimiter(double initial_rate,
absl::Duration doubling_time,
std::function<absl::Time()> clock)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate), std::move(clock)),
initial_rate_(initial_rate),
doubling_time_(doubling_time),
a_(GetLogA(doubling_time)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
ABSL_CHECK_GT(a_, 0);
}
double DoublingRateLimiter::TokensToAdd(absl::Time current,
absl::Time previous) const {
double int_current =
std::exp(a_ * absl::ToDoubleSeconds(current - start_time_));
double int_prev =
std::exp(a_ * absl::ToDoubleSeconds(previous - start_time_));
return initial_rate_ * (int_current - int_prev) / a_;
}
absl::Duration DoublingRateLimiter::GetSchedulerDelay() const {
return absl::Milliseconds(10);
}
ConstantRateLimiter::ConstantRateLimiter(double initial_rate)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate)),
initial_rate_(initial_rate),
r_(absl::Seconds(1.0 / initial_rate)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
}
ConstantRateLimiter::ConstantRateLimiter(double initial_rate,
std::function<absl::Time()> clock)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate), std::move(clock)),
initial_rate_(initial_rate),
r_(absl::Seconds(1.0 / initial_rate)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
}
double ConstantRateLimiter::TokensToAdd(absl::Time current,
absl::Time previous) const {
return initial_rate_ * absl::ToDoubleSeconds(current - previous);
}
absl::Duration ConstantRateLimiter::GetSchedulerDelay() const {
return std::max(r_, absl::Milliseconds(10));
}
}
} | #include "tensorstore/internal/rate_limiter/scaling_rate_limiter.h"
#include <stddef.h>
#include <atomic>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/util/executor.h"
namespace {
using ::tensorstore::Executor;
using ::tensorstore::ExecutorTask;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::ConstantRateLimiter;
using ::tensorstore::internal::DoublingRateLimiter;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::MakeIntrusivePtr;
using ::tensorstore::internal::RateLimiter;
using ::tensorstore::internal::RateLimiterNode;
struct Node : public RateLimiterNode, public AtomicReferenceCount<Node> {
RateLimiter* queue_;
ExecutorTask task_;
Node(RateLimiter* queue, ExecutorTask task)
: queue_(queue), task_(std::move(task)) {}
~Node() { queue_->Finish(this); }
static void Start(void* task) {
IntrusivePtr<Node> self(reinterpret_cast<Node*>(task), adopt_object_ref);
std::move(self->task_)();
}
};
TEST(ConstantRateLimiter, Basic) {
absl::Time now = absl::Now();
ConstantRateLimiter queue(0.2, [&now]() { return now; });
EXPECT_EQ(0.2, queue.initial_rate());
EXPECT_EQ(now, queue.start_time());
EXPECT_EQ(now, queue.last_update());
EXPECT_EQ(0, queue.available());
EXPECT_EQ(0, queue.TokensToAdd(now, now));
EXPECT_EQ(2, queue.TokensToAdd(now + absl::Seconds(10), now));
EXPECT_EQ(60, queue.TokensToAdd(now + absl::Seconds(300), now));
std::atomic<size_t> done{0};
{
for (int i = 0; i < 100; i++) {
auto node = MakeIntrusivePtr<Node>(&queue, [&done] {
done++;
});
intrusive_ptr_increment(node.get());
queue.Admit(node.get(), &Node::Start);
}
}
now += absl::Seconds(10);
queue.PeriodicCallForTesting();
EXPECT_EQ(2, done);
now += absl::Seconds(100);
queue.PeriodicCallForTesting();
EXPECT_EQ(22, done);
now += absl::Seconds(400);
queue.PeriodicCallForTesting();
EXPECT_EQ(100, done);
}
TEST(DoublingRateLimiter, Basic) {
absl::Time now = absl::Now();
DoublingRateLimiter queue(2, absl::Seconds(10), [&now]() { return now; });
EXPECT_EQ(2, queue.initial_rate());
EXPECT_EQ(absl::Seconds(10), queue.doubling_time());
EXPECT_EQ(0, queue.available());
EXPECT_EQ(0, queue.TokensToAdd(now, now));
EXPECT_THAT(
queue.TokensToAdd(now + absl::Seconds(11), now + absl::Seconds(10)),
::testing::Gt(4));
EXPECT_THAT(
queue.TokensToAdd(now + absl::Seconds(21), now + absl::Seconds(20)),
::testing::Gt(8));
std::atomic<size_t> done{0};
{
for (int i = 0; i < 100; i++) {
auto node = MakeIntrusivePtr<Node>(&queue, [&done] {
done++;
});
intrusive_ptr_increment(node.get());
queue.Admit(node.get(), &Node::Start);
}
}
EXPECT_EQ(0, done);
now += absl::Seconds(1);
queue.PeriodicCallForTesting();
EXPECT_EQ(2, done);
now += absl::Seconds(10);
queue.PeriodicCallForTesting();
EXPECT_EQ(32, done);
now += absl::Seconds(20);
queue.PeriodicCallForTesting();
EXPECT_EQ(100, done);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/rate_limiter/scaling_rate_limiter.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/rate_limiter/scaling_rate_limiter_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b9679af0-338e-4468-b3c2-67b768a45f76 | cpp | google/tensorstore | schedule_at | tensorstore/internal/thread/schedule_at.cc | tensorstore/internal/thread/schedule_at_test.cc | #include "tensorstore/internal/thread/schedule_at.h"
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <iterator>
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/functional/any_invocable.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/compare.h"
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/metrics/value.h"
#include "tensorstore/internal/tagged_ptr.h"
#include "tensorstore/internal/thread/thread.h"
#include "tensorstore/internal/tracing/tracing.h"
#include "tensorstore/util/stop_token.h"
using ::tensorstore::internal_metrics::MetricMetadata;
namespace tensorstore {
namespace internal {
namespace {
using ScheduleAtTask = absl::AnyInvocable<void() &&>;
auto& schedule_at_queued_ops = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/internal/thread/schedule_at/queued_ops",
MetricMetadata("Operations in flight on the schedule_at thread"));
auto& schedule_at_next_event = internal_metrics::Value<absl::Time>::New(
"/tensorstore/internal/thread/schedule_at/next_event",
MetricMetadata("Time of the next in-flight schedule_at operation"));
auto& schedule_at_insert_histogram_ms =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/internal/thread/schedule_at/insert_histogram_ms",
MetricMetadata("Histogram of schedule_at insert delays (ms)",
internal_metrics::Units::kMilliseconds));
class DeadlineTaskQueue;
using TaggedQueuePointer = TaggedPtr<DeadlineTaskQueue, 1>;
struct DeadlineTaskNode;
using DeadlineTaskTree = intrusive_red_black_tree::Tree<DeadlineTaskNode>;
struct DeadlineTaskStopCallback {
DeadlineTaskNode& node;
void operator()() const;
};
struct DeadlineTaskNode : public DeadlineTaskTree::NodeBase {
DeadlineTaskNode(absl::Time deadline, ScheduleAtTask&& task,
const StopToken& token)
: deadline(deadline),
task(std::move(task)),
trace_context(internal_tracing::TraceContext::kThread),
queue(TaggedQueuePointer{}),
stop_callback(token, DeadlineTaskStopCallback{*this}) {}
void RunAndDelete();
absl::Time deadline;
ScheduleAtTask task;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS internal_tracing::TraceContext trace_context;
std::atomic<TaggedQueuePointer> queue;
StopCallback<DeadlineTaskStopCallback> stop_callback;
};
using RunImmediatelyQueueAccessor =
intrusive_red_black_tree::LinkedListAccessor<DeadlineTaskNode>;
class DeadlineTaskQueue {
public:
explicit DeadlineTaskQueue()
: run_immediately_queue_(nullptr),
next_wakeup_(absl::InfinitePast()),
woken_up_(absl::InfinitePast()),
thread_({"TensorstoreScheduleAt"}, &DeadlineTaskQueue::Run, this) {}
~DeadlineTaskQueue() { ABSL_UNREACHABLE(); }
void ScheduleAt(absl::Time target_time, ScheduleAtTask task,
const StopToken& stop_token);
void Run();
private:
friend struct DeadlineTaskNode;
friend struct DeadlineTaskStopCallback;
void TryRemove(DeadlineTaskNode& node);
absl::Mutex mutex_;
absl::CondVar cond_var_;
DeadlineTaskTree tree_ ABSL_GUARDED_BY(mutex_);
DeadlineTaskNode* run_immediately_queue_ ABSL_GUARDED_BY(mutex_);
absl::Time next_wakeup_ ABSL_GUARDED_BY(mutex_);
absl::Time woken_up_ ABSL_GUARDED_BY(mutex_);
Thread thread_;
};
void DeadlineTaskQueue::ScheduleAt(absl::Time target_time, ScheduleAtTask task,
const StopToken& stop_token) {
schedule_at_queued_ops.Increment();
schedule_at_insert_histogram_ms.Observe(
absl::ToInt64Milliseconds(target_time - absl::Now()));
auto node = std::make_unique<DeadlineTaskNode>(target_time, std::move(task),
stop_token);
absl::MutexLock l(&mutex_);
auto tagged_queue_ptr = node->queue.exchange(TaggedQueuePointer(this));
if (tagged_queue_ptr.tag()) {
return;
}
if (target_time <= woken_up_) {
RunImmediatelyQueueAccessor{}.SetNext(node.get(), nullptr);
if (run_immediately_queue_) {
RunImmediatelyQueueAccessor{}.SetNext(
RunImmediatelyQueueAccessor{}.GetPrev(run_immediately_queue_),
node.get());
RunImmediatelyQueueAccessor{}.SetPrev(run_immediately_queue_, node.get());
} else {
run_immediately_queue_ = node.get();
RunImmediatelyQueueAccessor{}.SetPrev(node.get(), node.get());
}
if (next_wakeup_ != absl::InfinitePast()) {
next_wakeup_ = absl::InfinitePast();
cond_var_.Signal();
}
node.release();
return;
}
tree_.FindOrInsert(
[&](DeadlineTaskNode& other) {
return target_time < other.deadline ? absl::weak_ordering::less
: absl::weak_ordering::greater;
},
[&] { return node.release(); });
if (target_time < next_wakeup_) {
next_wakeup_ = target_time;
cond_var_.Signal();
}
}
void DeadlineTaskQueue::Run() {
while (true) {
DeadlineTaskTree runnable;
DeadlineTaskNode* run_immediately = nullptr;
{
absl::MutexLock l(&mutex_);
do {
run_immediately = std::exchange(run_immediately_queue_, nullptr);
if (!run_immediately) {
next_wakeup_ =
tree_.empty() ? absl::InfiniteFuture() : tree_.begin()->deadline;
schedule_at_next_event.Set(next_wakeup_);
cond_var_.WaitWithDeadline(&mutex_, next_wakeup_);
}
auto woken_up = woken_up_ = std::max(woken_up_, absl::Now());
auto split_result = tree_.FindSplit([&](DeadlineTaskNode& node) {
return node.deadline <= woken_up ? absl::weak_ordering::greater
: absl::weak_ordering::less;
});
runnable = std::move(split_result.trees[0]);
tree_ = std::move(split_result.trees[1]);
} while (runnable.empty() && !run_immediately);
next_wakeup_ = absl::InfinitePast();
}
internal_tracing::TraceContext base =
internal_tracing::TraceContext(internal_tracing::TraceContext::kThread);
while (run_immediately) {
auto* next = RunImmediatelyQueueAccessor{}.GetNext(run_immediately);
run_immediately->RunAndDelete();
run_immediately = next;
}
for (DeadlineTaskTree::iterator it = runnable.begin(), next;
it != runnable.end(); it = next) {
next = std::next(it);
runnable.Remove(*it);
it->RunAndDelete();
}
internal_tracing::SwapCurrentTraceContext(&base);
}
}
void DeadlineTaskNode::RunAndDelete() {
schedule_at_queued_ops.Decrement();
if (queue.load(std::memory_order_relaxed).tag()) {
} else {
internal_tracing::SwapCurrentTraceContext(&trace_context);
std::move(task)();
}
delete this;
}
void DeadlineTaskStopCallback::operator()() const {
auto tagged_queue_ptr = node.queue.exchange(TaggedQueuePointer{nullptr, 1});
auto* queue_ptr = tagged_queue_ptr.get();
if (!queue_ptr) {
return;
}
queue_ptr->TryRemove(node);
}
void DeadlineTaskQueue::TryRemove(DeadlineTaskNode& node) {
{
absl::MutexLock lock(&mutex_);
if (node.deadline <= woken_up_) {
return;
}
tree_.Remove(node);
}
delete &node;
schedule_at_queued_ops.Decrement();
}
}
void ScheduleAt(absl::Time target_time, ScheduleAtTask task,
const StopToken& stop_token) {
static absl::NoDestructor<DeadlineTaskQueue> g_queue;
g_queue->ScheduleAt(std::move(target_time), std::move(task), stop_token);
}
}
} | #include "tensorstore/internal/thread/schedule_at.h"
#include <memory>
#include <thread>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/util/stop_token.h"
namespace {
using ::tensorstore::StopSource;
using ::tensorstore::internal::ScheduleAt;
TEST(ScheduleAtTest, Basic) {
absl::Notification a, b;
auto now = absl::Now();
ScheduleAt(now + absl::Milliseconds(1), [&] { a.Notify(); });
ScheduleAt(now + absl::Milliseconds(5), [&] { b.Notify(); });
EXPECT_FALSE(b.HasBeenNotified());
b.WaitForNotification();
EXPECT_TRUE(a.HasBeenNotified());
}
TEST(ScheduleAtTest, RunImmediately) {
auto notification = std::make_shared<absl::Notification>();
ScheduleAt(absl::InfinitePast(), [=] { notification->Notify(); });
notification->WaitForNotification();
}
TEST(ScheduleAtTest, RunMultipleImmediately) {
auto notification = std::make_shared<absl::Notification>();
ScheduleAt(absl::Now(), [=] { notification->WaitForNotification(); });
auto notification1 = std::make_shared<absl::Notification>();
auto notification2 = std::make_shared<absl::Notification>();
ScheduleAt(absl::InfinitePast(), [=] {
EXPECT_FALSE(notification2->HasBeenNotified());
notification1->Notify();
});
ScheduleAt(absl::InfinitePast(), [=] { notification2->Notify(); });
notification->Notify();
notification1->WaitForNotification();
notification2->WaitForNotification();
}
TEST(ScheduleAtTest, Cancel) {
auto notification = std::make_shared<absl::Notification>();
EXPECT_EQ(1, notification.use_count());
StopSource stop_source;
ScheduleAt(
absl::InfiniteFuture(), [notification] { notification->Notify(); },
stop_source.get_token());
EXPECT_EQ(2, notification.use_count());
stop_source.request_stop();
EXPECT_EQ(1, notification.use_count());
EXPECT_FALSE(notification->HasBeenNotified());
}
TEST(ScheduleAtTest, CancelImmediately) {
auto notification = std::make_shared<absl::Notification>();
EXPECT_EQ(1, notification.use_count());
StopSource stop_source;
stop_source.request_stop();
ScheduleAt(
absl::InfinitePast(), [notification] { notification->Notify(); },
stop_source.get_token());
EXPECT_EQ(1, notification.use_count());
EXPECT_FALSE(notification->HasBeenNotified());
}
TEST(ScheduleAtTest, CancelWhileRunning) {
auto notification1 = std::make_shared<absl::Notification>();
StopSource stop_source;
ScheduleAt(absl::InfinitePast(), [=] {
notification1->WaitForNotification();
stop_source.request_stop();
});
auto notification2 = std::make_shared<absl::Notification>();
auto notification3 = std::make_shared<absl::Notification>();
ScheduleAt(
absl::InfinitePast(), [=] { notification2->Notify(); },
stop_source.get_token());
ScheduleAt(absl::InfinitePast(), [=] { notification3->Notify(); });
notification1->Notify();
notification3->WaitForNotification();
EXPECT_FALSE(notification2->HasBeenNotified());
EXPECT_EQ(1, notification2.use_count());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/schedule_at.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/schedule_at_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
295b54a4-a0b7-4604-a9ef-4a8f545a5f55 | cpp | google/tensorstore | thread_pool | tensorstore/internal/thread/thread_pool.cc | tensorstore/internal/thread/thread_pool_test.cc | #include "tensorstore/internal/thread/thread_pool.h"
#include <stddef.h>
#include <cassert>
#include <limits>
#include <memory>
#include <thread>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/thread/pool_impl.h"
#include "tensorstore/internal/thread/task.h"
#include "tensorstore/internal/thread/task_group_impl.h"
#include "tensorstore/util/executor.h"
namespace tensorstore {
namespace internal {
namespace {
Executor DefaultThreadPool(size_t num_threads) {
static absl::NoDestructor<internal_thread_impl::SharedThreadPool> pool_;
intrusive_ptr_increment(pool_.get());
if (num_threads == 0 || num_threads == std::numeric_limits<size_t>::max()) {
num_threads = std::thread::hardware_concurrency() * 16;
if (num_threads == 0) num_threads = 1024;
ABSL_LOG_FIRST_N(INFO, 1)
<< "DetachedThreadPool should specify num_threads; using "
<< num_threads;
}
auto task_group = internal_thread_impl::TaskGroup::Make(
internal::IntrusivePtr<internal_thread_impl::SharedThreadPool>(
pool_.get()),
num_threads);
return [task_group = std::move(task_group)](ExecutorTask task) {
task_group->AddTask(
std::make_unique<internal_thread_impl::InFlightTask>(std::move(task)));
};
}
}
Executor DetachedThreadPool(size_t num_threads) {
return DefaultThreadPool(num_threads);
}
}
} | #include "tensorstore/internal/thread/thread_pool.h"
#include <string>
#include "absl/flags/commandlineflag.h"
#include "absl/flags/reflection.h"
void SetupThreadPoolTestEnv() {
}
#include "tensorstore/internal/thread/thread_pool_test.inc" | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/thread_pool.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/thread_pool_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
de90b948-f9da-4f2f-a393-343a2fe93719 | cpp | google/tensorstore | pool_impl | tensorstore/internal/thread/pool_impl.cc | tensorstore/internal/thread/pool_impl_test.cc | #include "tensorstore/internal/thread/pool_impl.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_log.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/thread/task_provider.h"
#include "tensorstore/internal/thread/thread.h"
using ::tensorstore::internal_metrics::MetricMetadata;
namespace tensorstore {
namespace internal_thread_impl {
namespace {
constexpr absl::Duration kThreadStartDelay = absl::Milliseconds(5);
constexpr absl::Duration kThreadExitDelay = absl::Milliseconds(5);
constexpr absl::Duration kThreadIdleBeforeExit = absl::Seconds(20);
constexpr absl::Duration kOverseerIdleBeforeExit = absl::Seconds(20);
auto& thread_pool_started = internal_metrics::Counter<int64_t>::New(
"/tensorstore/thread_pool/started",
MetricMetadata("Threads started by SharedThreadPool"));
auto& thread_pool_active = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/thread_pool/active",
MetricMetadata("Active threads managed by SharedThreadPool"));
auto& thread_pool_task_providers = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/thread_pool/task_providers",
MetricMetadata("TaskProviders requesting threads from SharedThreadPool"));
ABSL_CONST_INIT internal_log::VerboseFlag thread_pool_logging("thread_pool");
}
SharedThreadPool::SharedThreadPool() : waiting_(128) {
ABSL_LOG_IF(INFO, thread_pool_logging) << "SharedThreadPool: " << this;
}
void SharedThreadPool::NotifyWorkAvailable(
internal::IntrusivePtr<TaskProvider> task_provider) {
absl::MutexLock lock(&mutex_);
if (in_queue_.insert(task_provider.get()).second) {
waiting_.push_back(std::move(task_provider));
}
if (!overseer_running_) {
StartOverseer();
} else {
overseer_condvar_.Signal();
}
}
internal::IntrusivePtr<TaskProvider>
SharedThreadPool::FindActiveTaskProvider() {
for (int i = waiting_.size(); i > 0; i--) {
internal::IntrusivePtr<TaskProvider> ptr = std::move(waiting_.front());
waiting_.pop_front();
auto work = ptr->EstimateThreadsRequired();
if (work == 0) {
in_queue_.erase(ptr.get());
continue;
}
if (work == 1) {
in_queue_.erase(ptr.get());
} else {
waiting_.push_back(ptr);
}
thread_pool_task_providers.Set(waiting_.size());
return ptr;
}
return nullptr;
}
struct SharedThreadPool::Overseer {
internal::IntrusivePtr<SharedThreadPool> pool_;
mutable absl::Time idle_start_time_;
void operator()() const;
void OverseerBody();
absl::Time MaybeStartWorker(absl::Time now)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pool_->mutex_);
};
void SharedThreadPool::StartOverseer() {
assert(!overseer_running_);
overseer_running_ = true;
tensorstore::internal::Thread::StartDetached(
{"ts_pool_overseer"},
SharedThreadPool::Overseer{
internal::IntrusivePtr<SharedThreadPool>(this)});
}
void SharedThreadPool::Overseer::operator()() const {
const_cast<SharedThreadPool::Overseer*>(this)->OverseerBody();
}
void SharedThreadPool::Overseer::OverseerBody() {
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "Overseer: " << this;
absl::Time now = absl::Now();
idle_start_time_ = now;
absl::Time deadline = absl::InfinitePast();
absl::MutexLock lock(&pool_->mutex_);
while (true) {
pool_->overseer_condvar_.WaitWithDeadline(&pool_->mutex_, deadline);
now = absl::Now();
deadline = MaybeStartWorker(now);
if (deadline < now) break;
}
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "~Overseer: " << this;
pool_->overseer_running_ = false;
}
absl::Time SharedThreadPool::Overseer::MaybeStartWorker(absl::Time now) {
if (pool_->idle_threads_ || pool_->waiting_.empty()) {
return idle_start_time_ + kOverseerIdleBeforeExit;
}
if (now < pool_->last_thread_start_time_ + kThreadStartDelay) {
return pool_->last_thread_start_time_ + kThreadStartDelay;
}
if (now < pool_->queue_assignment_time_ + kThreadStartDelay) {
return pool_->queue_assignment_time_ + kThreadStartDelay;
}
auto task_provider = pool_->FindActiveTaskProvider();
if (!task_provider) {
return idle_start_time_ + kOverseerIdleBeforeExit;
}
pool_->StartWorker(std::move(task_provider), now);
idle_start_time_ = now;
return now + kThreadStartDelay;
}
struct SharedThreadPool::Worker {
internal::IntrusivePtr<SharedThreadPool> pool_;
internal::IntrusivePtr<TaskProvider> task_provider_;
void operator()() const;
void WorkerBody();
};
void SharedThreadPool::StartWorker(
internal::IntrusivePtr<TaskProvider> task_provider, absl::Time now) {
last_thread_start_time_ = now;
worker_threads_++;
thread_pool_started.Increment();
tensorstore::internal::Thread::StartDetached(
{"ts_pool_worker"}, Worker{internal::IntrusivePtr<SharedThreadPool>(this),
std::move(task_provider)});
}
void SharedThreadPool::Worker::operator()() const {
const_cast<SharedThreadPool::Worker*>(this)->WorkerBody();
}
void SharedThreadPool::Worker::WorkerBody() {
struct ScopedIncDec {
size_t& x_;
ScopedIncDec(size_t& x) : x_(x) { x_++; }
~ScopedIncDec() { x_--; }
};
thread_pool_active.Increment();
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "Worker: " << this;
while (true) {
if (task_provider_) {
task_provider_->DoWorkOnThread();
task_provider_ = nullptr;
}
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "Idle: " << this;
absl::Time now = absl::Now();
absl::Time deadline = now + kThreadIdleBeforeExit;
{
absl::MutexLock lock(&pool_->mutex_);
ScopedIncDec idle(pool_->idle_threads_);
while (!task_provider_) {
bool active = pool_->mutex_.AwaitWithDeadline(
absl::Condition(
+[](SharedThreadPool* self) ABSL_EXCLUSIVE_LOCKS_REQUIRED(
self->mutex_) { return !self->waiting_.empty(); },
pool_.get()),
deadline);
now = absl::Now();
if (active) {
task_provider_ = pool_->FindActiveTaskProvider();
} else {
deadline = std::max(deadline,
pool_->last_thread_exit_time_ + kThreadExitDelay);
if (deadline < now) {
break;
}
}
}
if (task_provider_) {
pool_->queue_assignment_time_ = now;
} else {
pool_->worker_threads_--;
pool_->last_thread_exit_time_ = now;
break;
}
}
}
thread_pool_active.Decrement();
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "~Worker: " << this;
}
}
} | #include "tensorstore/internal/thread/pool_impl.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/thread/task.h"
#include "tensorstore/internal/thread/task_provider.h"
namespace {
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::MakeIntrusivePtr;
using ::tensorstore::internal_thread_impl::InFlightTask;
using ::tensorstore::internal_thread_impl::SharedThreadPool;
using ::tensorstore::internal_thread_impl::TaskProvider;
struct SingleTaskProvider : public TaskProvider {
struct private_t {};
public:
static IntrusivePtr<SingleTaskProvider> Make(
IntrusivePtr<SharedThreadPool> pool, std::unique_ptr<InFlightTask> task) {
return MakeIntrusivePtr<SingleTaskProvider>(private_t{}, std::move(pool),
std::move(task));
}
SingleTaskProvider(private_t, IntrusivePtr<SharedThreadPool> pool,
std::unique_ptr<InFlightTask> task)
: pool_(std::move(pool)), task_(std::move(task)) {}
~SingleTaskProvider() override = default;
int64_t EstimateThreadsRequired() override {
absl::MutexLock lock(&mutex_);
flags_ += 2;
return task_ ? 1 : 0;
}
void Trigger() {
pool_->NotifyWorkAvailable(IntrusivePtr<TaskProvider>(this));
}
void DoWorkOnThread() override {
std::unique_ptr<InFlightTask> task;
{
absl::MutexLock lock(&mutex_);
flags_ |= 1;
if (task_) {
task = std::move(task_);
}
}
if (task) {
task->Run();
}
}
IntrusivePtr<SharedThreadPool> pool_;
absl::Mutex mutex_;
std::unique_ptr<InFlightTask> task_ ABSL_GUARDED_BY(mutex_);
int64_t flags_ = 0;
};
TEST(SharedThreadPoolTest, Basic) {
auto pool = MakeIntrusivePtr<SharedThreadPool>();
{
absl::Notification notification;
auto provider = SingleTaskProvider::Make(
pool, std::make_unique<InFlightTask>([&] { notification.Notify(); }));
provider->Trigger();
provider->Trigger();
notification.WaitForNotification();
}
}
TEST(SharedThreadPoolTest, LotsOfProviders) {
auto pool = MakeIntrusivePtr<SharedThreadPool>();
std::vector<IntrusivePtr<SingleTaskProvider>> providers;
providers.reserve(1000);
for (int i = 2; i < 1000; i = i * 2) {
absl::BlockingCounter a(i);
for (int j = 0; j < i; j++) {
providers.push_back(SingleTaskProvider::Make(
pool, std::make_unique<InFlightTask>([&] { a.DecrementCount(); })));
}
for (auto& p : providers) p->Trigger();
a.Wait();
for (auto& p : providers) p->Trigger();
providers.clear();
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/pool_impl.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/pool_impl_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4006e144-3709-4d64-8719-d31365871b0e | cpp | google/tensorstore | thread | tensorstore/internal/thread/thread.cc | tensorstore/internal/thread/thread_test.cc | #if defined(__linux__) || defined(__APPLE__)
#include <pthread.h>
#endif
#include <thread>
#include <type_traits>
namespace tensorstore {
namespace internal {
void TrySetCurrentThreadName(const char* name) {
if (name == nullptr) return;
#if defined(__linux__)
pthread_setname_np(pthread_self(), name);
#endif
#if defined(__APPLE__)
pthread_setname_np(name);
#endif
}
}
} | #include "tensorstore/internal/thread/thread.h"
#include <gtest/gtest.h>
namespace {
TEST(ThreadTest, Basic) {
tensorstore::internal::Thread my_thread;
int x = 0;
tensorstore::internal::Thread::Id id[2];
my_thread = tensorstore::internal::Thread({}, [&x, &id]() {
x = 1;
id[1] = tensorstore::internal::Thread::this_thread_id();
});
id[0] = my_thread.get_id();
my_thread.Join();
EXPECT_EQ(id[0], id[1]);
EXPECT_EQ(1, x);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/thread.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/thread_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4e94c2d6-5c4b-4866-8f55-4f6af0728204 | cpp | google/tensorstore | storage | tensorstore/internal/poly/storage.cc | tensorstore/internal/poly/storage_test.cc | #include "tensorstore/internal/poly/storage.h"
namespace tensorstore {
namespace internal_poly_storage {
constexpr VTableBase NullVTable::vtable;
}
} | #include "tensorstore/internal/poly/storage.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_poly_storage::ActualInlineSize;
using ::tensorstore::internal_poly_storage::GetVTableBase;
using ::tensorstore::internal_poly_storage::HeapStorageOps;
using ::tensorstore::internal_poly_storage::InlineStorageOps;
using ::tensorstore::internal_poly_storage::Storage;
using ::tensorstore::internal_poly_storage::VTableBase;
static constexpr size_t kStorageSize = ActualInlineSize(8);
static_assert(80 == ActualInlineSize(79));
static_assert(80 == ActualInlineSize(80));
TEST(ObjectOps, InlineTrivial) {
using S = Storage<kStorageSize, true>;
using Ops = typename S::Ops<int>;
static_assert(std::is_same_v<Ops, InlineStorageOps<int>>);
static_assert(Ops::UsesInlineStorage());
S a, b;
EXPECT_EQ(nullptr, a.template get_if<int>());
Ops::Construct(a.storage(), 7);
Ops::Relocate(b.storage(), a.storage());
Ops::Copy(a.storage(), b.storage());
EXPECT_EQ(7, Ops::Get(a.storage()));
EXPECT_EQ(7, Ops::Get(b.storage()));
Ops::Destroy(a.storage());
Ops::Destroy(b.storage());
}
TEST(ObjectOps, NotInlineTrivial) {
struct X {
double x;
double y;
double z;
};
using S = Storage<kStorageSize, true>;
using Ops = typename S::Ops<X>;
static_assert(std::is_same_v<Ops, HeapStorageOps<X>>);
static_assert(!Ops::UsesInlineStorage());
S a, b;
EXPECT_EQ(nullptr, a.get_if<int>());
Ops::Construct(a.storage(), X{7, 8, 9});
Ops::Relocate(b.storage(), a.storage());
Ops::Copy(a.storage(), b.storage());
EXPECT_EQ(7, Ops::Get(a.storage()).x);
EXPECT_EQ(9, Ops::Get(b.storage()).z);
Ops::Destroy(a.storage());
Ops::Destroy(b.storage());
}
template <typename Ops, bool Copyable>
static const VTableBase* GetVTable() {
static VTableBase vtable = GetVTableBase<Ops, Copyable>();
return &vtable;
}
TEST(Storage, MoveOnly) {
using S = Storage<16, false>;
using Ops = typename S::Ops<int>;
{
S a;
EXPECT_TRUE(a.null());
EXPECT_EQ(nullptr, a.get_if<int>());
}
{
S a;
a.ConstructT<int>(GetVTable<Ops, false>(), 7);
ASSERT_FALSE(a.null());
ASSERT_NE(nullptr, a.get_if<int>());
EXPECT_EQ(7, *a.get_if<int>());
}
{
S a;
a.ConstructT<int>(GetVTable<Ops, false>(), 8);
S b = std::move(a);
ASSERT_FALSE(b.null());
ASSERT_NE(nullptr, b.get_if<int>());
EXPECT_EQ(8, *b.get_if<int>());
S c(std::move(b));
ASSERT_FALSE(c.null());
ASSERT_NE(nullptr, c.get_if<int>());
EXPECT_EQ(8, *c.get_if<int>());
}
}
TEST(Storage, Copy) {
using S = Storage<16, true>;
using Ops = typename S::Ops<int>;
{
S a;
EXPECT_TRUE(a.null());
EXPECT_EQ(nullptr, a.get_if<int>());
}
{
S a;
a.ConstructT<int>(GetVTable<Ops, true>(), 7);
ASSERT_FALSE(a.null());
ASSERT_NE(nullptr, a.get_if<int>());
EXPECT_EQ(7, *a.get_if<int>());
}
{
S a;
a.ConstructT<int>(GetVTable<Ops, true>(), 8);
S b = a;
ASSERT_NE(nullptr, b.get_if<int>());
EXPECT_EQ(8, *b.get_if<int>());
S c(a);
EXPECT_FALSE(a.null());
ASSERT_FALSE(c.null());
ASSERT_NE(nullptr, c.get_if<int>());
EXPECT_EQ(8, *c.get_if<int>());
a.Destroy();
EXPECT_TRUE(a.null());
EXPECT_EQ(nullptr, a.get_if<int>());
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/poly/storage.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/poly/storage_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
da7108be-91df-4133-aa86-1e9506bd2dda | cpp | google/tensorstore | key_range | tensorstore/kvstore/key_range.cc | tensorstore/kvstore/key_range_test.cc | #include "tensorstore/kvstore/key_range.h"
#include <algorithm>
#include <cstddef>
#include <ostream>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/match.h"
#include "absl/types/compare.h"
#include "tensorstore/internal/compare.h"
#include "tensorstore/util/quote_string.h"
namespace tensorstore {
namespace {
std::string_view PartialPrefix(std::string_view prefix) {
while (!prefix.empty() && prefix.back() == '\xff') {
prefix.remove_suffix(1);
}
return prefix;
}
std::string_view MinExclusiveMax(std::string_view a, std::string_view b) {
return KeyRange::CompareExclusiveMax(a, b) < 0 ? a : b;
}
}
KeyRange KeyRange::Prefix(std::string prefix) {
KeyRange range;
range.exclusive_max = PrefixExclusiveMax(prefix);
range.inclusive_min = std::move(prefix);
return range;
}
std::string KeyRange::Successor(std::string_view key) {
std::string successor;
successor.reserve(key.size() + 1);
successor.append(key);
successor += '\x00';
return successor;
}
KeyRange KeyRange::Singleton(std::string key) {
auto exclusive_max = Successor(key);
return KeyRange(std::move(key), std::move(exclusive_max));
}
bool KeyRange::is_singleton() const {
return exclusive_max.size() == (inclusive_min.size() + 1) &&
exclusive_max.back() == '\x00' &&
std::string_view(exclusive_max).substr(0, inclusive_min.size()) ==
inclusive_min;
}
bool KeyRange::is_non_empty_prefix() const {
std::string_view prefix = PartialPrefix(inclusive_min);
return !full() && exclusive_max.size() == prefix.size() &&
(prefix.empty() ||
(exclusive_max.back() == (prefix.back() + 1) &&
std::string_view(exclusive_max).substr(0, prefix.size() - 1) ==
prefix.substr(0, prefix.size() - 1)));
}
std::string KeyRange::PrefixExclusiveMax(std::string_view prefix) {
std::string prefix_copy(PartialPrefix(prefix));
if (!prefix_copy.empty()) {
auto& last_byte = prefix_copy.back();
last_byte = static_cast<unsigned char>(last_byte) + 1;
}
return prefix_copy;
}
absl::weak_ordering KeyRange::CompareKeyAndExclusiveMax(
std::string_view key, std::string_view bound) {
return bound.empty()
? absl::weak_ordering::less
: internal::CompareResultAsWeakOrdering(key.compare(bound));
}
absl::weak_ordering KeyRange::CompareExclusiveMax(std::string_view a,
std::string_view b) {
return a.empty() != b.empty()
? (a.empty() ? absl::weak_ordering::greater
: absl::weak_ordering::less)
: internal::CompareResultAsWeakOrdering(a.compare(b));
}
bool Contains(const KeyRange& haystack, std::string_view needle) {
return haystack.inclusive_min <= needle &&
KeyRange::CompareKeyAndExclusiveMax(needle, haystack.exclusive_max) <
0;
}
KeyRange Intersect(const KeyRange& a, const KeyRange& b) {
const auto* a_ptr = &a;
const auto* b_ptr = &b;
if (a_ptr->inclusive_min > b_ptr->inclusive_min) {
std::swap(a_ptr, b_ptr);
}
KeyRange result;
result.inclusive_min = b_ptr->inclusive_min;
result.exclusive_max =
std::string(MinExclusiveMax(a_ptr->exclusive_max, b_ptr->exclusive_max));
if (result.empty()) {
result.exclusive_max = result.inclusive_min;
}
return result;
}
bool Intersects(const KeyRange& a, const KeyRange& b) {
return !Intersect(a, b).empty();
}
bool Contains(const KeyRange& haystack, const KeyRange& needle) {
return haystack.inclusive_min <= needle.inclusive_min &&
KeyRange::CompareExclusiveMax(needle.exclusive_max,
haystack.exclusive_max) <= 0;
}
std::string_view LongestPrefix(const KeyRange& range) {
std::string_view inclusive_min = range.inclusive_min;
std::string_view exclusive_max = range.exclusive_max;
size_t i = 0;
if (exclusive_max.empty()) {
while (i < inclusive_min.size() && inclusive_min[i] == '\xff') {
++i;
}
} else {
size_t min_length = std::min(inclusive_min.size(), exclusive_max.size());
while (i < min_length && inclusive_min[i] == exclusive_max[i]) {
++i;
}
if (i + 1 == min_length && inclusive_min[i] != '\xff' &&
static_cast<unsigned char>(inclusive_min[i]) + 1 ==
static_cast<unsigned char>(exclusive_max[i])) {
++i;
while (i < inclusive_min.size() && inclusive_min[i] == '\xff') {
++i;
}
}
}
return inclusive_min.substr(0, i);
}
bool ContainsPrefix(const KeyRange& haystack, std::string_view prefix) {
return tensorstore::Contains(haystack, KeyRange::Prefix(std::string(prefix)));
}
bool IntersectsPrefix(const KeyRange& a, std::string_view prefix) {
return tensorstore::Intersects(a, KeyRange::Prefix(std::string(prefix)));
}
std::ostream& operator<<(std::ostream& os, const KeyRange& range) {
return os << "[" << tensorstore::QuoteString(range.inclusive_min) << ", "
<< tensorstore::QuoteString(range.exclusive_max) << ")";
}
KeyRange KeyRange::AddPrefix(std::string_view prefix, KeyRange range) {
if (prefix.empty()) return range;
range.inclusive_min.insert(0, prefix);
if (range.exclusive_max.empty()) {
range.exclusive_max = KeyRange::PrefixExclusiveMax(std::string(prefix));
} else {
range.exclusive_max.insert(0, prefix);
}
return range;
}
KeyRange KeyRange::RemovePrefix(std::string_view prefix, KeyRange range) {
if (prefix.empty()) return range;
if (prefix >= range.inclusive_min) {
range.inclusive_min.clear();
} else {
if (!absl::StartsWith(range.inclusive_min, prefix)) return EmptyRange();
range.inclusive_min.erase(0, prefix.size());
}
const auto c = CompareKeyAndExclusiveMax(prefix, range.exclusive_max);
if (c < 0) {
if (absl::StartsWith(range.exclusive_max, prefix)) {
range.exclusive_max.erase(0, prefix.size());
} else {
range.exclusive_max.clear();
}
} else {
return EmptyRange();
}
return range;
}
KeyRange KeyRange::RemovePrefixLength(size_t n, const KeyRange& range) {
std::string_view inclusive_min(range.inclusive_min);
if (n < inclusive_min.size()) {
inclusive_min.remove_prefix(n);
} else {
inclusive_min = {};
}
std::string_view exclusive_max(range.exclusive_max);
if (n < exclusive_max.size()) {
exclusive_max.remove_prefix(n);
} else {
exclusive_max = {};
}
return KeyRange(std::string(inclusive_min), std::string(exclusive_max));
}
} | #include "tensorstore/kvstore/key_range.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/compare.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::KeyRange;
TEST(KeyRangeTest, Comparison) {
KeyRange r1("a", "b");
EXPECT_EQ("a", r1.inclusive_min);
EXPECT_EQ("b", r1.exclusive_max);
KeyRange r2("a", "c");
KeyRange r3("", "b");
KeyRange r4("", "c");
EXPECT_EQ(r1, r1);
EXPECT_EQ(r2, r2);
EXPECT_EQ(r3, r3);
EXPECT_EQ(r4, r4);
EXPECT_NE(r1, r2);
EXPECT_NE(r1, r3);
EXPECT_NE(r1, r4);
EXPECT_NE(r2, r3);
EXPECT_NE(r2, r4);
EXPECT_NE(r3, r4);
}
TEST(KeyRangeTest, Full) {
KeyRange full;
EXPECT_TRUE(full.full());
EXPECT_EQ(std::string(), full.inclusive_min);
EXPECT_EQ(std::string(), full.exclusive_max);
EXPECT_EQ(full, KeyRange({}, {}));
EXPECT_NE(full, KeyRange("a", "b"));
EXPECT_NE(full, KeyRange("", "b"));
EXPECT_NE(full, KeyRange("a", ""));
EXPECT_FALSE(full.empty());
EXPECT_EQ("", tensorstore::LongestPrefix(full));
EXPECT_TRUE(tensorstore::Contains(full, "abc"));
EXPECT_EQ(KeyRange::Prefix(""), full);
}
TEST(KeyRangeTest, Empty) {
EXPECT_FALSE(KeyRange("a", "b").empty());
EXPECT_FALSE(KeyRange("a", "").empty());
EXPECT_TRUE(KeyRange("b", "a").empty());
EXPECT_TRUE(KeyRange("b", "b").empty());
}
TEST(KeyRangeTest, Prefix) {
EXPECT_EQ(KeyRange(), KeyRange::Prefix(""));
EXPECT_EQ(KeyRange("abc", "abd"), KeyRange::Prefix("abc"));
EXPECT_EQ(KeyRange("ab\xff", "ac"), KeyRange::Prefix("ab\xff"));
EXPECT_EQ(KeyRange("ab\xff\xff\xff", "ac"),
KeyRange::Prefix("ab\xff\xff\xff"));
EXPECT_EQ(KeyRange("\xff", ""), KeyRange::Prefix("\xff"));
EXPECT_EQ(KeyRange("\xff\xff\xff", ""), KeyRange::Prefix("\xff\xff\xff"));
EXPECT_FALSE(KeyRange::Prefix("").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("abc").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("ab\xff").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("ab\xff\xff\xff").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("\xff").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("\xff\xff\xff").is_non_empty_prefix());
EXPECT_FALSE(KeyRange::Prefix("ab\xff").full());
EXPECT_FALSE(KeyRange::Prefix("ab\xff").is_singleton());
}
TEST(KeyRangeTest, Successor) {
EXPECT_EQ(std::string({'a', 'b', 'c', '\x00'}), KeyRange::Successor("abc"));
EXPECT_EQ(std::string({'\x00'}), KeyRange::Successor(""));
}
TEST(KeyRangeTest, ContainsKey) {
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "a"));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "ab"));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "abc"));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "b"));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "ba"));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), "c"));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), "ca"));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), "d"));
}
TEST(KeyRangeTest, ContainsRange) {
EXPECT_TRUE(tensorstore::Contains(KeyRange(), KeyRange("ab", "cd")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("a", "c")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("ab", "c")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("ab", "ba")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("b", "ba")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange::Prefix("a")));
EXPECT_TRUE(
tensorstore::Contains(KeyRange("a", "c"), KeyRange::Prefix("ab")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange::Prefix("b")));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("a", "ca")));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("0", "a")));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), KeyRange()));
}
TEST(KeyRangeTest, Intersect) {
EXPECT_EQ(KeyRange("b", "b"),
tensorstore::Intersect(KeyRange("a", "b"), KeyRange("b", "c")));
EXPECT_EQ(KeyRange("c", "c"),
tensorstore::Intersect(KeyRange("a", "b"), KeyRange("c", "d")));
EXPECT_EQ(KeyRange("b", "b"),
tensorstore::Intersect(KeyRange("", "b"), KeyRange("b", "")));
EXPECT_EQ(KeyRange("a", "b"),
tensorstore::Intersect(KeyRange(), KeyRange("a", "b")));
EXPECT_EQ(KeyRange("a", "b"),
tensorstore::Intersect(KeyRange("a", "b"), KeyRange()));
EXPECT_EQ(KeyRange("a", "b"),
tensorstore::Intersect(KeyRange("a", "b"), KeyRange("a", "c")));
EXPECT_EQ(KeyRange("a", "b"),
tensorstore::Intersect(KeyRange("a", "c"), KeyRange("a", "b")));
EXPECT_EQ(KeyRange("b", "c"),
tensorstore::Intersect(KeyRange("a", "c"), KeyRange("b", "c")));
EXPECT_EQ(KeyRange("aa", "b"),
tensorstore::Intersect(KeyRange("aa", "c"), KeyRange("a", "b")));
EXPECT_EQ(KeyRange("aa", "b"),
tensorstore::Intersect(KeyRange("aa", ""), KeyRange("a", "b")));
}
TEST(KeyRangeTest, LongestPrefix) {
EXPECT_EQ("", tensorstore::LongestPrefix(KeyRange("a", "c")));
EXPECT_EQ("a", tensorstore::LongestPrefix(KeyRange("a", "b")));
EXPECT_EQ("a", tensorstore::LongestPrefix(KeyRange("aa", "b")));
EXPECT_EQ("abc", tensorstore::LongestPrefix(KeyRange("abc", "abcd")));
EXPECT_EQ("abc", tensorstore::LongestPrefix(KeyRange("abc", "abd")));
EXPECT_EQ("ab", tensorstore::LongestPrefix(KeyRange("abc", "abe")));
EXPECT_EQ("ab\xff", tensorstore::LongestPrefix(KeyRange("ab\xff", "ac")));
EXPECT_EQ("ab\xff\xff",
tensorstore::LongestPrefix(KeyRange("ab\xff\xff", "ac")));
EXPECT_EQ("\xff", tensorstore::LongestPrefix(KeyRange("\xff", "")));
EXPECT_EQ("\xff\xff", tensorstore::LongestPrefix(KeyRange("\xff\xff", "")));
}
TEST(KeyRangeTest, Ostream) {
EXPECT_EQ("[\"a\", \"b\")", tensorstore::StrCat(KeyRange("a", "b")));
EXPECT_EQ("[\"a\", \"ba\")", tensorstore::StrCat(KeyRange("a", "ba")));
}
TEST(KeyRangeTest, CompareKeyAndExclusiveMax) {
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("a", "a"),
::testing::Eq(absl::weak_ordering::equivalent));
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("a", "b"),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("b", "a"),
::testing::Eq(absl::weak_ordering::greater));
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("", ""),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("a", ""),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("a", "a"),
::testing::Eq(absl::weak_ordering::equivalent));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("a", "b"),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("b", "a"),
::testing::Eq(absl::weak_ordering::greater));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("", ""),
::testing::Eq(absl::weak_ordering::greater));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("", "a"),
::testing::Eq(absl::weak_ordering::greater));
}
TEST(KeyRangeTest, CompareExclusiveMax) {
EXPECT_THAT(KeyRange::CompareExclusiveMax("", ""),
::testing::Eq(absl::weak_ordering::equivalent));
EXPECT_THAT(KeyRange::CompareExclusiveMax("a", "a"),
::testing::Eq(absl::weak_ordering::equivalent));
EXPECT_THAT(KeyRange::CompareExclusiveMax("a", "b"),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareExclusiveMax("b", "a"),
::testing::Eq(absl::weak_ordering::greater));
EXPECT_THAT(KeyRange::CompareExclusiveMax("a", ""),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareExclusiveMax("", "a"),
::testing::Eq(absl::weak_ordering::greater));
}
TEST(KeyRangeTest, AddPrefix) {
EXPECT_THAT(KeyRange::AddPrefix("", KeyRange("a", "b")),
::testing::Eq(KeyRange("a", "b")));
EXPECT_THAT(KeyRange::AddPrefix("x", KeyRange("a", "b")),
::testing::Eq(KeyRange("xa", "xb")));
EXPECT_THAT(KeyRange::AddPrefix("x", KeyRange("a", "")),
::testing::Eq(KeyRange("xa", "y")));
}
TEST(KeyRangeTest, EmptyRange) {
auto range = KeyRange::EmptyRange();
EXPECT_TRUE(range.empty());
EXPECT_EQ(range.inclusive_min, range.exclusive_max);
}
TEST(KeyRangeTest, RemovePrefix) {
EXPECT_THAT(KeyRange::RemovePrefix("", KeyRange("a", "b")),
::testing::Eq(KeyRange("a", "b")));
EXPECT_THAT(KeyRange::RemovePrefix("a/", KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange("b", "d")));
EXPECT_THAT(KeyRange::RemovePrefix("a/b", KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange()));
EXPECT_THAT(KeyRange::RemovePrefix("a/d", KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange::EmptyRange()));
EXPECT_THAT(KeyRange::RemovePrefix("a/bc", KeyRange("a/b", "a/bcb")),
::testing::Eq(KeyRange("", "b")));
EXPECT_THAT(KeyRange::RemovePrefix("x", KeyRange("xa", "y")),
::testing::Eq(KeyRange("a", "")));
EXPECT_THAT(KeyRange::RemovePrefix("ab", KeyRange::Prefix("ab")),
::testing::Eq(KeyRange()));
EXPECT_THAT(KeyRange::RemovePrefix("ab", KeyRange::Prefix("ab\xff")),
::testing::Eq(KeyRange("\xff", "")));
}
TEST(KeyRangeTest, RemovePrefixLength) {
EXPECT_THAT(KeyRange::RemovePrefixLength(0, KeyRange("a", "b")),
::testing::Eq(KeyRange("a", "b")));
EXPECT_THAT(KeyRange::RemovePrefixLength(2, KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange("b", "d")));
EXPECT_THAT(KeyRange::RemovePrefixLength(3, KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange()));
EXPECT_THAT(KeyRange::RemovePrefixLength(4, KeyRange("a/b", "a/bcb")),
::testing::Eq(KeyRange("", "b")));
EXPECT_THAT(KeyRange::RemovePrefixLength(1, KeyRange("xa", "y")),
::testing::Eq(KeyRange("a", "")));
EXPECT_THAT(KeyRange::RemovePrefixLength(2, KeyRange::Prefix("ab")),
::testing::Eq(KeyRange()));
EXPECT_THAT(KeyRange::RemovePrefixLength(2, KeyRange::Prefix("ab\xff")),
::testing::Eq(KeyRange("\xff", "")));
}
TEST(KeyRangeTest, Singleton) {
auto r = KeyRange::Singleton("x");
EXPECT_TRUE(Contains(r, "x"));
EXPECT_FALSE(Contains(r, KeyRange::Successor("x")));
EXPECT_EQ(KeyRange("x", KeyRange::Successor("x")), r);
EXPECT_TRUE(KeyRange::Singleton("x").is_singleton());
EXPECT_FALSE(KeyRange::Singleton("y").full());
EXPECT_FALSE(KeyRange::Singleton("x").is_non_empty_prefix());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/key_range.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/key_range_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
29b5e76f-177d-4976-9b2a-49a08a665656 | cpp | google/tensorstore | byte_range | tensorstore/kvstore/byte_range.cc | tensorstore/kvstore/byte_range_test.cc | #include "tensorstore/kvstore/byte_range.h"
#include <cassert>
#include <optional>
#include <ostream>
#include <string>
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, const OptionalByteRangeRequest& r) {
os << "[" << r.inclusive_min << ", ";
if (r.exclusive_max != -1) {
os << r.exclusive_max;
} else {
os << "?";
}
os << ")";
return os;
}
std::ostream& operator<<(std::ostream& os, const ByteRange& r) {
return os << "[" << r.inclusive_min << ", " << r.exclusive_max << ")";
}
Result<ByteRange> OptionalByteRangeRequest::Validate(int64_t size) const {
assert(SatisfiesInvariants());
int64_t inclusive_min = this->inclusive_min;
int64_t exclusive_max = this->exclusive_max;
if (exclusive_max == -1) exclusive_max = size;
if (inclusive_min < 0) {
inclusive_min += size;
}
if (inclusive_min < 0 || exclusive_max > size ||
inclusive_min > exclusive_max) {
return absl::OutOfRangeError(
tensorstore::StrCat("Requested byte range ", *this,
" is not valid for value of size ", size));
}
return ByteRange{inclusive_min, exclusive_max};
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::ByteRange, tensorstore::serialization::ApplyMembersSerializer<
tensorstore::ByteRange>())
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::OptionalByteRangeRequest,
tensorstore::serialization::ApplyMembersSerializer<
tensorstore::OptionalByteRangeRequest>()) | #include "tensorstore/kvstore/byte_range.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::ByteRange;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::StrCat;
using ::tensorstore::internal::GetSubCord;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(ByteRangeTest, SatisfiesInvariants) {
EXPECT_TRUE((ByteRange{0, 0}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{0, 1}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{0, 100}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{10, 100}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{100, 100}).SatisfiesInvariants());
EXPECT_FALSE((ByteRange{100, 99}).SatisfiesInvariants());
EXPECT_FALSE((ByteRange{100, 0}).SatisfiesInvariants());
EXPECT_FALSE((ByteRange{-100, 0}).SatisfiesInvariants());
}
TEST(ByteRangeTest, Size) {
EXPECT_EQ(5, (ByteRange{2, 7}.size()));
EXPECT_EQ(0, (ByteRange{2, 2}.size()));
}
TEST(ByteRangeTest, Comparison) {
ByteRange a{1, 2};
ByteRange b{1, 3};
ByteRange c{2, 3};
EXPECT_TRUE(a == a);
EXPECT_TRUE(b == b);
EXPECT_TRUE(c == c);
EXPECT_FALSE(a != a);
EXPECT_FALSE(b != b);
EXPECT_FALSE(c != c);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_NE(a, c);
EXPECT_NE(b, c);
}
TEST(ByteRangeTest, Ostream) {
EXPECT_EQ("[1, 10)", tensorstore::StrCat(ByteRange{1, 10}));
}
TEST(OptionalByteRangeRequestTest, DefaultConstruct) {
OptionalByteRangeRequest r;
EXPECT_EQ(0, r.inclusive_min);
EXPECT_EQ(-1, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, ConstructInclusiveMin) {
OptionalByteRangeRequest r(5);
EXPECT_EQ(5, r.inclusive_min);
EXPECT_EQ(-1, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, ConstructInclusiveMinExclusiveMax) {
OptionalByteRangeRequest r(5, 10);
EXPECT_EQ(5, r.inclusive_min);
EXPECT_EQ(10, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, ConstructByteRange) {
OptionalByteRangeRequest r(ByteRange{5, 10});
EXPECT_EQ(5, r.inclusive_min);
EXPECT_EQ(10, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, Comparison) {
OptionalByteRangeRequest a{1, 2};
OptionalByteRangeRequest b{1, 3};
OptionalByteRangeRequest c{2, 3};
OptionalByteRangeRequest d{1, -1};
EXPECT_TRUE(a == a);
EXPECT_TRUE(b == b);
EXPECT_TRUE(c == c);
EXPECT_TRUE(d == d);
EXPECT_FALSE(a != a);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != c);
EXPECT_TRUE(a != d);
EXPECT_TRUE(b != d);
EXPECT_TRUE(c != d);
}
TEST(OptionalByteRangeRequestTest, SatisfiesInvariants) {
EXPECT_TRUE(OptionalByteRangeRequest().SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(10).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(0, 1).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(0, 0).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(0, 100).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(10, 100).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(100, 100).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(100, 99).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(100, 0).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(-5, 0).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(-5, 3).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(3, -2).SatisfiesInvariants());
}
TEST(OptionalByteRangeRequestTest, Ostream) {
EXPECT_EQ("[5, 10)", StrCat(OptionalByteRangeRequest(5, 10)));
EXPECT_EQ("[5, ?)", StrCat(OptionalByteRangeRequest(5)));
}
TEST(OptionalByteRangeRequestTest, Validate) {
EXPECT_THAT(OptionalByteRangeRequest().Validate(0),
::testing::Optional(ByteRange{0, 0}));
EXPECT_THAT(OptionalByteRangeRequest().Validate(1),
::testing::Optional(ByteRange{0, 1}));
EXPECT_THAT(OptionalByteRangeRequest(5, 10).Validate(20),
::testing::Optional(ByteRange{5, 10}));
EXPECT_THAT(OptionalByteRangeRequest(5, 10).Validate(10),
::testing::Optional(ByteRange{5, 10}));
EXPECT_THAT(OptionalByteRangeRequest(5).Validate(10),
::testing::Optional(ByteRange{5, 10}));
EXPECT_THAT(OptionalByteRangeRequest(-3).Validate(10),
::testing::Optional(ByteRange{7, 10}));
EXPECT_THAT(OptionalByteRangeRequest(-10).Validate(10),
::testing::Optional(ByteRange{0, 10}));
EXPECT_THAT(OptionalByteRangeRequest(5, 10).Validate(9),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Requested byte range \\[5, 10\\) is not valid for "
"value of size 9"));
EXPECT_THAT(
OptionalByteRangeRequest(10, 15).Validate(9),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Requested byte range \\[10, 15\\) is not valid for "
"value of size 9"));
EXPECT_THAT(
OptionalByteRangeRequest(-10).Validate(9),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Requested byte range \\[-10, \\?\\) is not valid for "
"value of size 9"));
}
TEST(GetSubStringTest, Basic) {
EXPECT_EQ("bcd", GetSubCord(absl::Cord("abcde"), {1, 4}));
EXPECT_EQ("bcd", GetSubCord(absl::Cord("abcde"), {1, 4}));
EXPECT_EQ("abcde", GetSubCord(absl::Cord("abcde"), {0, 5}));
}
TEST(ByteRangeSerializationTest, Basic) {
TestSerializationRoundTrip(ByteRange{1, 5});
}
TEST(OptionalByteRangeRequestSerializationTest, Basic) {
TestSerializationRoundTrip(OptionalByteRangeRequest{1, 5});
TestSerializationRoundTrip(OptionalByteRangeRequest{1});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/byte_range.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/byte_range_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a182fb05-afd0-428d-a2cb-989b5f45682f | cpp | google/tensorstore | generation | tensorstore/kvstore/generation.cc | tensorstore/kvstore/generation_test.cc | #include "tensorstore/kvstore/generation.h"
#include <stddef.h>
#include <stdint.h>
#include <cstring>
#include <ostream>
#include <string_view>
#include <utility>
#include "absl/time/time.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/quote_string.h"
namespace tensorstore {
namespace {
std::string_view CanonicalGeneration(std::string_view generation) {
size_t new_size = generation.size();
while (new_size && generation[new_size - 1] == 0) {
--new_size;
}
return generation.substr(0, new_size);
}
}
std::ostream& operator<<(std::ostream& os, const StorageGeneration& g) {
return os << QuoteString(g.value);
}
std::ostream& operator<<(std::ostream& os,
const TimestampedStorageGeneration& x) {
return os << "{generation=" << x.generation << ", time=" << x.time << "}";
}
bool StorageGeneration::Equivalent(std::string_view a, std::string_view b) {
return CanonicalGeneration(a) == CanonicalGeneration(b);
}
StorageGeneration StorageGeneration::Clean(StorageGeneration generation) {
size_t new_size = generation.value.size();
while (new_size) {
if (generation.value[new_size - 1] & kBaseGeneration) {
generation.value[new_size - 1] &= ~(kDirty | kNewlyDirty);
break;
}
--new_size;
}
generation.value.resize(new_size);
return generation;
}
void StorageGeneration::MarkDirty() {
if (value.empty()) {
value = (kDirty | kNewlyDirty);
} else {
value.back() |= (kDirty | kNewlyDirty);
}
}
StorageGeneration StorageGeneration::Dirty(StorageGeneration generation) {
if (generation.value.empty()) {
return StorageGeneration{std::string(1, kDirty)};
}
generation.value.back() |= kDirty;
return generation;
}
StorageGeneration StorageGeneration::FromUint64(uint64_t n) {
StorageGeneration generation;
generation.value.resize(9);
std::memcpy(generation.value.data(), &n, 8);
generation.value[8] = kBaseGeneration;
return generation;
}
StorageGeneration StorageGeneration::FromString(std::string_view s) {
StorageGeneration generation;
generation.value.reserve(s.size() + 1);
generation.value += s;
generation.value += kBaseGeneration;
return generation;
}
StorageGeneration StorageGeneration::Condition(
const StorageGeneration& generation, StorageGeneration condition) {
if (IsDirty(generation)) {
return Dirty(Clean(std::move(condition)));
}
return Clean(std::move(condition));
}
bool StorageGeneration::IsDirty(const StorageGeneration& generation) {
auto canonical = CanonicalGeneration(generation.value);
return !canonical.empty() && (canonical.back() & kDirty);
}
bool StorageGeneration::IsInnerLayerDirty(const StorageGeneration& generation) {
return !generation.value.empty() && (generation.value.back() & kDirty);
}
StorageGeneration StorageGeneration::AddLayer(StorageGeneration generation) {
generation.value.resize(generation.value.size() + 1);
return generation;
}
bool StorageGeneration::IsConditional(const StorageGeneration& generation) {
size_t new_size = generation.value.size();
while (new_size && !(generation.value[new_size - 1] & kBaseGeneration)) {
--new_size;
}
return (new_size != 0);
}
bool StorageGeneration::IsConditionalOn(const StorageGeneration& generation,
const StorageGeneration& condition) {
size_t size = generation.value.size();
return size != 0 && condition.value.size() == size &&
std::memcmp(generation.value.data(), condition.value.data(),
size - 1) == 0 &&
(generation.value[size] | kDirty | kNewlyDirty) ==
(condition.value[size] | kDirty | kNewlyDirty);
}
std::string_view StorageGeneration::DecodeString(
const StorageGeneration& generation) {
std::string_view s = generation.value;
if (s.empty()) return {};
while (true) {
bool start_of_tags = static_cast<bool>(s.back() & kBaseGeneration);
s.remove_suffix(1);
if (start_of_tags || s.empty()) break;
}
return s;
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::StorageGeneration,
tensorstore::serialization::ApplyMembersSerializer<
tensorstore::StorageGeneration>())
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::TimestampedStorageGeneration,
tensorstore::serialization::ApplyMembersSerializer<
tensorstore::TimestampedStorageGeneration>()) | #include "tensorstore/kvstore/generation.h"
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
namespace {
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(StorageGenerationTest, Basic) {
EXPECT_TRUE(StorageGeneration::IsUnknown(StorageGeneration::Unknown()));
EXPECT_FALSE(StorageGeneration::IsUnknown(StorageGeneration::NoValue()));
EXPECT_FALSE(StorageGeneration::IsNoValue(StorageGeneration::Unknown()));
EXPECT_TRUE(StorageGeneration::IsNoValue(StorageGeneration::NoValue()));
EXPECT_EQ(StorageGeneration{std::string{StorageGeneration::kDirty}},
StorageGeneration::Dirty(StorageGeneration::Unknown()));
StorageGeneration gen{
std::string{1, 2, 3, 4, 5, StorageGeneration::kBaseGeneration}};
StorageGeneration local_gen{std::string{
1, 2, 3, 4, 5,
StorageGeneration::kBaseGeneration | StorageGeneration::kDirty}};
EXPECT_FALSE(StorageGeneration::IsUnknown(gen));
EXPECT_FALSE(StorageGeneration::IsUnknown(local_gen));
EXPECT_TRUE(StorageGeneration::IsClean(gen));
EXPECT_FALSE(StorageGeneration::IsClean(local_gen));
EXPECT_FALSE(StorageGeneration::IsDirty(gen));
EXPECT_TRUE(StorageGeneration::IsDirty(local_gen));
EXPECT_EQ(local_gen, StorageGeneration::Dirty(gen));
EXPECT_EQ(gen, StorageGeneration::Clean(local_gen));
EXPECT_TRUE(StorageGeneration::IsClean(StorageGeneration::NoValue()));
EXPECT_FALSE(StorageGeneration::IsClean(StorageGeneration::Unknown()));
EXPECT_EQ(StorageGeneration::NoValue(),
StorageGeneration::Clean(StorageGeneration::NoValue()));
}
TEST(StorageGenerationTest, Uint64) {
auto g = StorageGeneration::FromUint64(12345);
EXPECT_TRUE(StorageGeneration::IsUint64(g));
EXPECT_EQ(12345, StorageGeneration::ToUint64(g));
EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Unknown()));
EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::NoValue()));
EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Invalid()));
}
TEST(StorageGenerationSerializationTest, Basic) {
TestSerializationRoundTrip(StorageGeneration::Unknown());
TestSerializationRoundTrip(StorageGeneration::FromUint64(12345));
}
TEST(TimestampedStorageGenerationSerializationTest, Basic) {
TestSerializationRoundTrip(TimestampedStorageGeneration(
StorageGeneration::FromUint64(12345), absl::InfinitePast()));
TestSerializationRoundTrip(TimestampedStorageGeneration(
StorageGeneration::FromUint64(12345), absl::InfiniteFuture()));
}
TEST(StorageGenerationTest, IsCleanValidValue) {
EXPECT_FALSE(
StorageGeneration::IsCleanValidValue(StorageGeneration::Unknown()));
EXPECT_FALSE(
StorageGeneration::IsCleanValidValue(StorageGeneration::NoValue()));
EXPECT_FALSE(
StorageGeneration::IsCleanValidValue(StorageGeneration::Invalid()));
EXPECT_TRUE(StorageGeneration::IsCleanValidValue(
StorageGeneration::FromString("abc")));
EXPECT_TRUE(
StorageGeneration::IsCleanValidValue(StorageGeneration::FromUint64(42)));
}
TEST(StorageGenerationTest, DecodeString) {
EXPECT_EQ("abc", StorageGeneration::DecodeString(
StorageGeneration::FromString("abc")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/generation.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/generation_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
81f8af8a-489b-4b14-9049-bb797e3be1da | cpp | google/tensorstore | aws_credentials_resource | tensorstore/kvstore/s3/aws_credentials_resource.cc | tensorstore/kvstore/s3/aws_credentials_resource_test.cc | #include "tensorstore/kvstore/s3/aws_credentials_resource.h"
#include <stddef.h>
#include <cassert>
#include <memory>
#include <optional>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/credentials/default_credential_provider.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace internal_kvstore_s3 {
using Spec = ::tensorstore::internal_kvstore_s3::AwsCredentialsResource::Spec;
using Resource =
::tensorstore::internal_kvstore_s3::AwsCredentialsResource::Resource;
const internal::ContextResourceRegistration<AwsCredentialsResource>
aws_credentials_registration;
Result<Resource> AwsCredentialsResource::Create(
const Spec& spec, internal::ContextResourceCreationContext context) const {
if (spec.anonymous) {
return Resource{spec, nullptr};
}
auto result = GetAwsCredentialProvider(
spec.filename, spec.profile, spec.metadata_endpoint,
internal_http::GetDefaultHttpTransport());
if (!result.ok() && absl::IsNotFound(result.status())) {
return Resource{spec, nullptr};
}
TENSORSTORE_RETURN_IF_ERROR(result);
return Resource{spec, *std::move(result)};
}
Result<std::optional<AwsCredentials>>
AwsCredentialsResource::Resource::GetCredentials() {
if (!credential_provider_) return std::nullopt;
auto credential_result_ = credential_provider_->GetCredentials();
if (!credential_result_.ok() &&
absl::IsNotFound(credential_result_.status())) {
return std::nullopt;
}
return credential_result_;
}
namespace {
static constexpr auto kAnonymousBinder = jb::Object(jb::Member(
"anonymous", jb::Projection<&Spec::anonymous>(
jb::Validate([](const auto& options, bool* x) {
if (*x != true) {
return absl::InvalidArgumentError(
"\"anonymous\" must be true or not present in "
"\"aws_credentials\"");
}
return absl::OkStatus();
}))));
static constexpr auto kParameterBinder = jb::Object(
jb::OptionalMember("profile", jb::Projection<&Spec::profile>()),
jb::OptionalMember("filename", jb::Projection<&Spec::filename>()),
jb::OptionalMember("metadata_endpoint",
jb::Projection<&Spec::metadata_endpoint>()));
}
absl::Status AwsCredentialsResource::FromJsonImpl(
const JsonSerializationOptions& options, Spec* spec, ::nlohmann::json* j) {
if (auto* j_obj = j->template get_ptr<::nlohmann::json::object_t*>();
j_obj && j_obj->find("anonymous") != j_obj->end()) {
return kAnonymousBinder(std::true_type{}, options, spec, j);
}
return kParameterBinder(std::true_type{}, options, spec, j);
}
absl::Status AwsCredentialsResource::ToJsonImpl(
const JsonSerializationOptions& options, const Spec* spec,
::nlohmann::json* j) {
if (spec->anonymous) {
return kAnonymousBinder(std::false_type{}, options, spec, j);
}
return kParameterBinder(std::false_type{}, options, spec, j);
}
}
} | #include "tensorstore/kvstore/s3/aws_credentials_resource.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_kvstore_s3::AwsCredentialsResource;
namespace {
TEST(AwsCredentialsResourceTest, InvalidDirectSpec) {
EXPECT_THAT(Context::Resource<AwsCredentialsResource>::FromJson(nullptr),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected non-null value, but received: null"));
EXPECT_THAT(Context::Resource<AwsCredentialsResource>::FromJson(3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected object, but received: 3"));
EXPECT_THAT(
Context::Resource<AwsCredentialsResource>::FromJson("anonymous"),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Invalid reference to \"aws_credentials\" resource: \"anonymous\""));
}
TEST(AwsCredentialsResourceTest, Default) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<AwsCredentialsResource>::FromJson("aws_credentials"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, false);
}
TEST(AwsCredentialsResourceTest, ExplicitDefault) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<AwsCredentialsResource>::FromJson(
::nlohmann::json::object_t()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, false);
}
TEST(AwsCredentialsResourceTest, ValidSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<AwsCredentialsResource>::FromJson(
{{"profile", "my_profile"}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, "my_profile");
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, false);
}
TEST(AwsCredentialsResourceTest, ValidAnonymousSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<AwsCredentialsResource>::FromJson(
{{"anonymous", true}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, true);
EXPECT_THAT(resource->GetCredentials(),
tensorstore::IsOkAndHolds(::testing::Eq(std::nullopt)));
}
TEST(AwsCredentialsResourceTest, InvalidSpecs) {
EXPECT_THAT(Context::Resource<AwsCredentialsResource>::FromJson({
{"anonymous", true},
{"profile", "xyz"},
}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/aws_credentials_resource.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/aws_credentials_resource_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6b142648-b737-4464-859c-a7183799af5a | cpp | google/tensorstore | s3_request_builder | tensorstore/kvstore/s3/s3_request_builder.cc | tensorstore/kvstore/s3/s3_request_builder_test.cc | #ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
#include "tensorstore/kvstore/s3/s3_request_builder.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/strings/ascii.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/time/time.h"
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include "tensorstore/internal/digest/sha256.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/s3_uri_utils.h"
using ::tensorstore::internal::ParseGenericUri;
using ::tensorstore::internal::SHA256Digester;
using ::tensorstore::internal_http::HttpRequest;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag s3_logging("s3");
constexpr static size_t kHmacSize = 32;
void ComputeHmac(std::string_view key, std::string_view message,
unsigned char (&hmac)[kHmacSize]) {
unsigned int md_len = kHmacSize;
ABSL_CHECK(
HMAC(EVP_sha256(), reinterpret_cast<const unsigned char*>(key.data()),
key.size(), reinterpret_cast<const unsigned char*>(message.data()),
message.size(), hmac, &md_len) &&
md_len == kHmacSize);
}
void ComputeHmac(unsigned char (&key)[kHmacSize], std::string_view message,
unsigned char (&hmac)[kHmacSize]) {
unsigned int md_len = kHmacSize;
ABSL_CHECK(HMAC(EVP_sha256(), key, kHmacSize,
reinterpret_cast<const unsigned char*>(message.data()),
message.size(), hmac, &md_len) &&
md_len == kHmacSize);
}
std::string CanonicalRequest(
std::string_view method, std::string_view path, std::string_view query,
std::string_view payload_hash,
const std::vector<std::pair<std::string, std::string_view>>& headers) {
std::string canonical =
absl::StrCat(method, "\n", S3UriObjectKeyEncode(path), "\n", query, "\n");
std::vector<std::string_view> signed_headers;
signed_headers.reserve(headers.size());
for (auto& pair : headers) {
absl::StrAppend(&canonical, pair.first, ":", pair.second, "\n");
signed_headers.push_back(pair.first);
}
absl::StrAppend(&canonical, "\n", absl::StrJoin(signed_headers, ";"), "\n",
payload_hash);
return canonical;
}
std::string SigningString(std::string_view canonical_request,
const absl::Time& time, std::string_view scope) {
absl::TimeZone utc = absl::UTCTimeZone();
SHA256Digester sha256;
sha256.Write(canonical_request);
const auto digest = sha256.Digest();
auto digest_sv = std::string_view(reinterpret_cast<const char*>(&digest[0]),
digest.size());
return absl::StrFormat("AWS4-HMAC-SHA256\n%s\n%s\n%s",
absl::FormatTime("%Y%m%dT%H%M%SZ", time, utc), scope,
absl::BytesToHexString(digest_sv));
}
void GetSigningKey(std::string_view aws_secret_access_key,
std::string_view aws_region, const absl::Time& time,
unsigned char (&signing_key)[kHmacSize]) {
absl::TimeZone utc = absl::UTCTimeZone();
unsigned char date_key[kHmacSize];
unsigned char date_region_key[kHmacSize];
unsigned char date_region_service_key[kHmacSize];
ComputeHmac(absl::StrCat("AWS4", aws_secret_access_key),
absl::FormatTime("%Y%m%d", time, utc), date_key);
ComputeHmac(date_key, aws_region, date_region_key);
ComputeHmac(date_region_key, "s3", date_region_service_key);
ComputeHmac(date_region_service_key, "aws4_request", signing_key);
}
std::string AuthorizationHeader(
std::string_view access_key, std::string_view scope,
std::string_view signature_hex,
const std::vector<std::pair<std::string, std::string_view>>& headers) {
return absl::StrFormat(
"Authorization: AWS4-HMAC-SHA256 "
"Credential=%s/%s, "
"SignedHeaders=%s, "
"Signature=%s",
access_key, scope,
absl::StrJoin(headers, ";",
[](std::string* out, auto pair) {
absl::StrAppend(out, pair.first);
}),
signature_hex);
}
static constexpr char kAmzContentSha256Header[] = "x-amz-content-sha256: ";
static constexpr char kAmzSecurityTokenHeader[] = "x-amz-security-token: ";
static constexpr char kAmzRequesterPayerHeader[] =
"x-amz-requester-payer: requester";
}
S3RequestBuilder& S3RequestBuilder::MaybeAddRequesterPayer(
bool requester_payer) {
if (requester_payer) {
builder_.AddHeader(kAmzRequesterPayerHeader);
}
return *this;
}
HttpRequest S3RequestBuilder::BuildRequest(std::string_view host_header,
const AwsCredentials& credentials,
std::string_view aws_region,
std::string_view payload_sha256_hash,
const absl::Time& time) {
builder_.AddHostHeader(host_header);
builder_.AddHeader(
absl::StrCat(kAmzContentSha256Header, payload_sha256_hash));
builder_.AddHeader(absl::FormatTime("x-amz-date: %Y%m%dT%H%M%SZ", time,
absl::UTCTimeZone()));
std::stable_sort(std::begin(query_params_), std::end(query_params_));
for (const auto& [k, v] : query_params_) {
builder_.AddQueryParameter(k, v);
}
if (credentials.IsAnonymous()) {
return builder_.BuildRequest();
}
if (!credentials.session_token.empty()) {
builder_.AddHeader(
absl::StrCat(kAmzSecurityTokenHeader, credentials.session_token));
}
auto request = builder_.BuildRequest();
std::vector<std::pair<std::string, std::string_view>> signed_headers;
signed_headers.reserve(request.headers.size());
for (const auto& header_str : request.headers) {
std::string_view header = header_str;
auto pos = header.find(':');
assert(pos != std::string::npos);
auto key = absl::AsciiStrToLower(
absl::StripAsciiWhitespace(header.substr(0, pos)));
auto value = absl::StripAsciiWhitespace(header.substr(pos + 1));
signed_headers.push_back({std::move(key), std::move(value)});
}
std::stable_sort(std::begin(signed_headers), std::end(signed_headers));
auto parsed_uri = ParseGenericUri(request.url);
assert(!parsed_uri.path.empty());
std::string scope = absl::StrFormat(
"%s/%s/s3/aws4_request",
absl::FormatTime("%Y%m%d", time, absl::UTCTimeZone()), aws_region);
canonical_request_ =
CanonicalRequest(request.method, parsed_uri.path, parsed_uri.query,
payload_sha256_hash, signed_headers);
signing_string_ = SigningString(canonical_request_, time, scope);
unsigned char signing_key[kHmacSize];
GetSigningKey(credentials.secret_key, aws_region, time, signing_key);
unsigned char signature[kHmacSize];
ComputeHmac(signing_key, signing_string_, signature);
signature_ = absl::BytesToHexString(
std::string_view(reinterpret_cast<char*>(&signature[0]), kHmacSize));
std::string auth_header = AuthorizationHeader(credentials.access_key, scope,
signature_, signed_headers);
ABSL_LOG_IF(INFO, s3_logging.Level(1))
<< "Canonical Request\n"
<< canonical_request_
<< "\n\nSigning String\n"
<< signing_string_
<< "\n\nSigning Key\n"
<< absl::BytesToHexString(std::string_view(
reinterpret_cast<char*>(signing_key), kHmacSize))
<< "\n\nAuthorization Header\n"
<< auth_header;
request.headers.emplace_back(std::move(auth_header));
return request;
}
}
} | #include "tensorstore/kvstore/s3/s3_request_builder.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/time/civil_time.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
using ::tensorstore::internal_kvstore_s3::AwsCredentials;
using ::tensorstore::internal_kvstore_s3::S3RequestBuilder;
namespace {
static const AwsCredentials credentials{
"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", ""};
static const absl::TimeZone utc = absl::UTCTimeZone();
static constexpr char aws_region[] = "us-east-1";
static constexpr char bucket[] = "examplebucket";
TEST(S3RequestBuilderTest, SignatureMethods) {
const auto now =
absl::FromCivil(absl::CivilSecond(2024, 2, 21, 03, 02, 05), utc);
auto builder =
S3RequestBuilder(
"PUT", "https:
.AddHeader("content-md5: 1B2M2Y8AsgTpgAmY7PhCfg==")
.AddHeader("content-type: text/plain");
auto request = builder.BuildRequest(
"bucket.s3.us-west-2.amazonaws.com", credentials, "us-west-2",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", now);
auto expected_canonical_request =
"PUT\n"
"/bucket/tensorstore/a-_.~%24%26%2C%3A%3D%40z/b/file.txt\n"
"\n"
"content-md5:1B2M2Y8AsgTpgAmY7PhCfg==\n"
"content-type:text/plain\n"
"host:bucket.s3.us-west-2.amazonaws.com\n"
"x-amz-content-sha256:"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n"
"x-amz-date:20240221T030205Z\n"
"\n"
"content-md5;content-type;host;x-amz-content-sha256;x-amz-date\n"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
auto expected_signing_string =
"AWS4-HMAC-SHA256\n"
"20240221T030205Z\n"
"20240221/us-west-2/s3/aws4_request\n"
"28c393b04c83956e1d4056351030e34bffa3dd877cf6cf2d0c83d2114bef7940";
auto expected_signature =
"c3bf762eae82b8a87dc5f7af8c2ad8973d4a0132c49bd8c46d025d4a1aa175fb";
EXPECT_EQ(builder.GetCanonicalRequest(), expected_canonical_request);
EXPECT_EQ(builder.GetSigningString(), expected_signing_string);
EXPECT_EQ(builder.GetSignature(), expected_signature);
}
TEST(S3RequestBuilderTest, AWS4SignatureGetExample) {
auto url = absl::StrFormat("https:
auto builder = S3RequestBuilder("GET", url).AddHeader("range: bytes=0-9");
auto request = builder.BuildRequest(
"", credentials, aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
auto expected_canonical_request =
"GET\n"
"/test.txt\n"
"\n"
"host:examplebucket.s3.amazonaws.com\n"
"range:bytes=0-9\n"
"x-amz-content-sha256:"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n"
"x-amz-date:20130524T000000Z\n"
"\n"
"host;range;x-amz-content-sha256;x-amz-date\n"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
auto expected_signing_string =
"AWS4-HMAC-SHA256\n"
"20130524T000000Z\n"
"20130524/us-east-1/s3/aws4_request\n"
"7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972";
auto expected_signature =
"f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41";
auto expected_auth_header =
"Authorization: AWS4-HMAC-SHA256 "
"Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, "
"SignedHeaders=host;range;x-amz-content-sha256;x-amz-date, "
"Signature="
"f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41";
EXPECT_EQ(builder.GetCanonicalRequest(), expected_canonical_request);
EXPECT_EQ(builder.GetSigningString(), expected_signing_string);
EXPECT_EQ(builder.GetSignature(), expected_signature);
EXPECT_EQ(request.url, url);
EXPECT_THAT(
request.headers,
::testing::UnorderedElementsAre(
expected_auth_header, "host: examplebucket.s3.amazonaws.com",
"x-amz-content-sha256: "
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"x-amz-date: 20130524T000000Z", "range: bytes=0-9"));
}
TEST(S3RequestBuilderTest, AWS4SignaturePutExample) {
auto url = absl::StrFormat("s3:
auto builder = S3RequestBuilder("PUT", url)
.AddHeader("date: Fri, 24 May 2013 00:00:00 GMT")
.AddHeader("x-amz-storage-class: REDUCED_REDUNDANCY");
auto request = builder.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), credentials, aws_region,
"44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
auto expected_canonical_request =
"PUT\n"
"/test%24file.text\n"
"\n"
"date:Fri, 24 May 2013 00:00:00 GMT\n"
"host:examplebucket.s3.amazonaws.com\n"
"x-amz-content-sha256:"
"44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072\n"
"x-amz-date:20130524T000000Z\n"
"x-amz-storage-class:REDUCED_REDUNDANCY\n"
"\n"
"date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class\n"
"44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072";
auto expected_signing_string =
"AWS4-HMAC-SHA256\n"
"20130524T000000Z\n"
"20130524/us-east-1/s3/aws4_request\n"
"9e0e90d9c76de8fa5b200d8c849cd5b8dc7a3be3951ddb7f6a76b4158342019d";
auto expected_signature =
"98ad721746da40c64f1a55b78f14c238d841ea1380cd77a1b5971af0ece108bd";
auto expected_auth_header =
"Authorization: AWS4-HMAC-SHA256 "
"Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, "
"SignedHeaders=date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-"
"class, "
"Signature="
"98ad721746da40c64f1a55b78f14c238d841ea1380cd77a1b5971af0ece108bd";
EXPECT_EQ(builder.GetCanonicalRequest(), expected_canonical_request);
EXPECT_EQ(builder.GetSigningString(), expected_signing_string);
EXPECT_EQ(builder.GetSignature(), expected_signature);
EXPECT_EQ(request.url, url);
EXPECT_EQ(request.headers.size(), 6);
EXPECT_THAT(
request.headers,
::testing::UnorderedElementsAre(
expected_auth_header, "date: Fri, 24 May 2013 00:00:00 GMT",
"host: examplebucket.s3.amazonaws.com",
"x-amz-content-sha256: "
"44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072",
"x-amz-date: 20130524T000000Z",
"x-amz-storage-class: REDUCED_REDUNDANCY"));
}
TEST(S3RequestBuilderTest, AWS4SignatureListObjectsExample) {
auto url = absl::StrFormat("https:
auto builder = S3RequestBuilder("GET", url)
.AddQueryParameter("prefix", "J")
.AddQueryParameter("max-keys", "2");
auto request = builder.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), credentials, aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
auto expected_canonical_request =
"GET\n"
"/\n"
"max-keys=2&prefix=J\n"
"host:examplebucket.s3.amazonaws.com\n"
"x-amz-content-sha256:"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n"
"x-amz-date:20130524T000000Z\n"
"\n"
"host;x-amz-content-sha256;x-amz-date\n"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
auto expected_signing_string =
"AWS4-HMAC-SHA256\n"
"20130524T000000Z\n"
"20130524/us-east-1/s3/aws4_request\n"
"df57d21db20da04d7fa30298dd4488ba3a2b47ca3a489c74750e0f1e7df1b9b7";
auto expected_signature =
"34b48302e7b5fa45bde8084f4b7868a86f0a534bc59db6670ed5711ef69dc6f7";
auto expected_auth_header =
"Authorization: AWS4-HMAC-SHA256 "
"Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, "
"SignedHeaders=host;x-amz-content-sha256;x-amz-date, "
"Signature="
"34b48302e7b5fa45bde8084f4b7868a86f0a534bc59db6670ed5711ef69dc6f7";
EXPECT_EQ(builder.GetCanonicalRequest(), expected_canonical_request);
EXPECT_EQ(builder.GetSigningString(), expected_signing_string);
EXPECT_EQ(builder.GetSignature(), expected_signature);
EXPECT_EQ(request.url, absl::StrCat(url, "?max-keys=2&prefix=J"));
EXPECT_EQ(request.headers.size(), 4);
EXPECT_THAT(
request.headers,
::testing::UnorderedElementsAre(
expected_auth_header, "host: examplebucket.s3.amazonaws.com",
"x-amz-content-sha256: "
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"x-amz-date: 20130524T000000Z"));
}
TEST(S3RequestBuilderTest, AnonymousCredentials) {
auto url = absl::StrFormat("https:
auto builder = S3RequestBuilder("GET", url).AddQueryParameter("test", "this");
auto request = builder.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), AwsCredentials{},
aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
EXPECT_EQ(request.url, absl::StrCat(url, "?test=this"));
EXPECT_EQ(request.headers.size(), 3);
EXPECT_THAT(request.headers, ::testing::Not(::testing::Contains(
::testing::HasSubstr("Authorization:"))));
EXPECT_THAT(
request.headers,
::testing::UnorderedElementsAre(
"host: examplebucket.s3.amazonaws.com",
"x-amz-content-sha256: "
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"x-amz-date: 20130524T000000Z"));
}
TEST(S3RequestBuilderTest, AwsSessionTokenHeaderAdded) {
auto token = "abcdef1234567890";
auto sts_credentials =
AwsCredentials{credentials.access_key, credentials.secret_key, token};
auto builder =
S3RequestBuilder("GET", absl::StrFormat("https:
auto request = builder.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), sts_credentials,
aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
EXPECT_EQ(request.headers.size(), 5);
EXPECT_THAT(request.headers,
::testing::Contains(::testing::HasSubstr("Authorization: ")));
EXPECT_THAT(request.headers, ::testing::Contains(absl::StrCat(
"x-amz-security-token: ", token)));
}
TEST(S3RequestBuilderTest, AwsRequesterPaysHeaderAdded) {
auto request =
S3RequestBuilder("GET", absl::StrFormat("https:
.MaybeAddRequesterPayer(false)
.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), credentials,
aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b85"
"5",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
EXPECT_THAT(request.headers,
::testing::Not(::testing::Contains(
::testing::HasSubstr("x-amz-requester-payer"))));
request =
S3RequestBuilder("GET", absl::StrFormat("https:
.MaybeAddRequesterPayer(true)
.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), credentials,
aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b85"
"5",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
EXPECT_THAT(request.headers,
::testing::Contains("x-amz-requester-payer: requester"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_request_builder.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_request_builder_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a8ed8874-de57-43b2-817a-53cedca9fb8e | cpp | google/tensorstore | validate | tensorstore/kvstore/gcs/validate.cc | tensorstore/kvstore/gcs/validate_test.cc | #include "tensorstore/kvstore/gcs/validate.h"
#include <iterator>
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/utf8.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_storage_gcs {
bool IsValidBucketName(std::string_view bucket) {
if (bucket.size() < 3 || bucket.size() > 222) return false;
if (!absl::ascii_isdigit(*bucket.begin()) &&
!absl::ascii_islower(*bucket.begin())) {
return false;
}
if (!absl::ascii_isdigit(*bucket.rbegin()) &&
!absl::ascii_islower(*bucket.rbegin())) {
return false;
}
for (std::string_view v : absl::StrSplit(bucket, absl::ByChar('.'))) {
if (v.empty()) return false;
if (v.size() > 63) return false;
if (*v.begin() == '-') return false;
if (*v.rbegin() == '-') return false;
for (const auto ch : v) {
if (ch != '-' && ch != '_' && !absl::ascii_isdigit(ch) &&
!absl::ascii_islower(ch)) {
return false;
}
}
}
return true;
}
bool IsValidObjectName(std::string_view name) {
if (name.empty() || name.size() > 1024) return false;
if (name == "." || name == "..") return false;
if (absl::StartsWith(name, ".well-known/acme-challenge")) return false;
for (const auto ch : name) {
if (ch == '\r' || ch == '\n') return false;
if (absl::ascii_iscntrl(ch)) return false;
}
return internal::IsValidUtf8(name);
}
bool IsValidStorageGeneration(const StorageGeneration& gen) {
return StorageGeneration::IsUnknown(gen) ||
StorageGeneration::IsNoValue(gen) ||
(StorageGeneration::IsUint64(gen) &&
StorageGeneration::ToUint64(gen) > 0);
}
absl::Status GcsHttpResponseToStatus(
const internal_http::HttpResponse& response, bool& retryable,
SourceLocation loc) {
auto absl_status_code = HttpResponseCodeToStatusCode(response);
if (absl_status_code == absl::StatusCode::kOk) {
return absl::OkStatus();
}
retryable = (response.status_code == 429 ||
response.status_code == 408 ||
response.status_code >= 500
);
std::string error_message;
auto payload = response.payload;
auto payload_str = payload.Flatten();
if (auto j_obj = internal::ParseJson(payload_str); j_obj.is_object()) {
if (auto j_error = internal_json::JsonExtractMember(
j_obj.template get_ptr<::nlohmann::json::object_t*>(), "error");
j_error.is_object()) {
if (auto j_message = internal_json::JsonExtractMember(
j_error.template get_ptr<::nlohmann::json::object_t*>(),
"message");
j_message.is_string()) {
error_message = j_message.template get<std::string>();
}
}
}
if (error_message.empty()) {
error_message = HttpResponseCodeToMessage(response);
if (error_message.empty()) {
error_message = "Unknown";
}
}
absl::Status status(absl_status_code, error_message);
status.SetPayload("http_response_code",
absl::Cord(absl::StrFormat("%d", response.status_code)));
if (!payload_str.empty()) {
status.SetPayload(
"http_response_body",
payload.Subcord(0,
payload_str.size() < 256 ? payload_str.size() : 256));
}
if (auto id_header = response.headers.find("x-guploader-uploadid");
id_header != response.headers.end()) {
status.SetPayload("x-guploader-uploadid", absl::Cord(id_header->second));
}
MaybeAddSourceLocation(status, loc);
return status;
}
}
} | #include "tensorstore/kvstore/gcs/validate.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_storage_gcs::IsValidBucketName;
using ::tensorstore::internal_storage_gcs::IsValidObjectName;
TEST(ValidateTest, IsValidBucketName) {
EXPECT_TRUE(IsValidBucketName("foo"));
EXPECT_TRUE(IsValidBucketName("a.b"));
EXPECT_TRUE(IsValidBucketName("a-b"));
EXPECT_TRUE(IsValidBucketName("1.2.3.4"));
EXPECT_FALSE(IsValidBucketName("_abc"));
EXPECT_FALSE(IsValidBucketName("abc_"));
EXPECT_FALSE(
IsValidBucketName("1234567890b123456789012345678901234567890"
"1234567890b123456789012345678901234567890"
"abcd"));
EXPECT_TRUE(IsValidBucketName("a._b"));
EXPECT_TRUE(IsValidBucketName("a_.b"));
EXPECT_FALSE(IsValidBucketName("."));
EXPECT_FALSE(IsValidBucketName(".."));
EXPECT_FALSE(IsValidBucketName("aa"));
EXPECT_FALSE(IsValidBucketName("_foo"));
EXPECT_FALSE(IsValidBucketName("foo_"));
EXPECT_FALSE(IsValidBucketName("a..b"));
EXPECT_FALSE(IsValidBucketName("a.-b"));
EXPECT_FALSE(IsValidBucketName("a-.b"));
EXPECT_FALSE(
IsValidBucketName("1234567890b123456789012345678901234567890"
"1234567890b123456789012345678901234567890"
"abcd.b"));
}
TEST(ValidateTest, IsValidObjectName) {
EXPECT_TRUE(IsValidObjectName("foo"));
EXPECT_TRUE(IsValidObjectName("foo.bar"));
EXPECT_TRUE(IsValidObjectName("foo/bar\\baz"));
EXPECT_FALSE(IsValidObjectName(""));
EXPECT_FALSE(IsValidObjectName("."));
EXPECT_FALSE(IsValidObjectName(".."));
EXPECT_FALSE(IsValidObjectName(".well-known/acme-challenge"));
EXPECT_FALSE(IsValidObjectName("foo\rbar"));
EXPECT_FALSE(IsValidObjectName("foo\nbar"));
EXPECT_TRUE(IsValidObjectName("foo[*?#]"));
EXPECT_FALSE(IsValidObjectName("foo\004bar"));
EXPECT_FALSE(IsValidObjectName("foo\tbar"));
EXPECT_FALSE(IsValidObjectName("\xfe\xfe\xff\xff"));
EXPECT_FALSE(IsValidObjectName("\xfc\x80\x80\x80\x80\xaf"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs/validate.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs/validate_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
0df8f891-2efd-4dd7-ba09-7e8edd2d97e0 | cpp | google/tensorstore | s3_key_value_store | tensorstore/kvstore/s3/s3_key_value_store.cc | tensorstore/kvstore/s3/s3_key_value_store_test.cc | #include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <cassert>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/digest/sha256.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/generic_coalescing_batch_util.h"
#include "tensorstore/kvstore/http/byte_range_util.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/s3/aws_credentials_resource.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/s3_endpoint.h"
#include "tensorstore/kvstore/s3/s3_metadata.h"
#include "tensorstore/kvstore/s3/s3_request_builder.h"
#include "tensorstore/kvstore/s3/s3_resource.h"
#include "tensorstore/kvstore/s3/s3_uri_utils.h"
#include "tensorstore/kvstore/s3/validate.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tinyxml2.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/util/garbage_collection/std_optional.h"
using ::tensorstore::internal::DataCopyConcurrencyResource;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::RateLimiter;
using ::tensorstore::internal::RateLimiterNode;
using ::tensorstore::internal::ScheduleAt;
using ::tensorstore::internal::SHA256Digester;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_kvstore_s3::AwsCredentials;
using ::tensorstore::internal_kvstore_s3::AwsCredentialsResource;
using ::tensorstore::internal_kvstore_s3::AwsHttpResponseToStatus;
using ::tensorstore::internal_kvstore_s3::GetNodeInt;
using ::tensorstore::internal_kvstore_s3::GetNodeText;
using ::tensorstore::internal_kvstore_s3::IsValidBucketName;
using ::tensorstore::internal_kvstore_s3::IsValidObjectName;
using ::tensorstore::internal_kvstore_s3::IsValidStorageGeneration;
using ::tensorstore::internal_kvstore_s3::S3ConcurrencyResource;
using ::tensorstore::internal_kvstore_s3::S3EndpointRegion;
using ::tensorstore::internal_kvstore_s3::S3RateLimiterResource;
using ::tensorstore::internal_kvstore_s3::S3RequestBuilder;
using ::tensorstore::internal_kvstore_s3::S3RequestRetries;
using ::tensorstore::internal_kvstore_s3::S3UriEncode;
using ::tensorstore::internal_kvstore_s3::StorageGenerationFromHeaders;
using ::tensorstore::kvstore::Key;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListOptions;
using ::tensorstore::kvstore::ListReceiver;
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct S3Metrics : public internal_kvstore::CommonMetrics {
internal_metrics::Counter<int64_t>& retries;
};
auto s3_metrics = []() -> S3Metrics {
return {
TENSORSTORE_KVSTORE_COMMON_METRICS(s3),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
s3, retries, "count of all retried requests (read/write/delete)")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag s3_logging("s3");
static constexpr char kUriScheme[] = "s3";
static constexpr char kEmptySha256[] =
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
static constexpr char kEmptyEtag[] = "\"\"";
static constexpr size_t kMaxS3PutSize = size_t{5} * 1024 * 1024 * 1024;
bool AddGenerationHeader(S3RequestBuilder* builder, std::string_view header,
const StorageGeneration& gen) {
if (StorageGeneration::IsUnknown(gen)) {
return false;
}
auto etag = StorageGeneration::IsNoValue(gen)
? kEmptyEtag
: StorageGeneration::DecodeString(gen);
builder->AddHeader(absl::StrCat(header, ": ", etag));
return true;
}
std::string payload_sha256(const absl::Cord& cord = absl::Cord()) {
SHA256Digester sha256;
sha256.Write(cord);
auto digest = sha256.Digest();
auto digest_sv = std::string_view(reinterpret_cast<const char*>(&digest[0]),
digest.size());
return absl::BytesToHexString(digest_sv);
}
bool DefaultIsRetryableCode(absl::StatusCode code) {
return code == absl::StatusCode::kDeadlineExceeded ||
code == absl::StatusCode::kUnavailable;
}
struct S3KeyValueStoreSpecData {
std::string bucket;
bool requester_pays;
std::optional<std::string> endpoint;
std::optional<std::string> host_header;
std::string aws_region;
Context::Resource<AwsCredentialsResource> aws_credentials;
Context::Resource<S3ConcurrencyResource> request_concurrency;
std::optional<Context::Resource<S3RateLimiterResource>> rate_limiter;
Context::Resource<S3RequestRetries> retries;
Context::Resource<DataCopyConcurrencyResource> data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.bucket, x.requester_pays, x.endpoint, x.host_header,
x.aws_region, x.aws_credentials, x.request_concurrency,
x.rate_limiter, x.retries, x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member("bucket",
jb::Projection<&S3KeyValueStoreSpecData::bucket>(jb::Validate(
[](const auto& options, const std::string* x) {
if (!IsValidBucketName(*x)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid S3 bucket name: ", QuoteString(*x)));
}
return absl::OkStatus();
}))),
jb::Member("requester_pays",
jb::Projection<&S3KeyValueStoreSpecData::requester_pays>(
jb::DefaultValue([](auto* v) { *v = false; }))),
jb::Member("host_header",
jb::Projection<&S3KeyValueStoreSpecData::host_header>()),
jb::Member("endpoint",
jb::Projection<&S3KeyValueStoreSpecData::endpoint>()),
jb::Member("aws_region",
jb::Projection<&S3KeyValueStoreSpecData::aws_region>(
jb::DefaultValue([](auto* v) { *v = ""; }))),
jb::Member(AwsCredentialsResource::id,
jb::Projection<&S3KeyValueStoreSpecData::aws_credentials>()),
jb::Member(
S3ConcurrencyResource::id,
jb::Projection<&S3KeyValueStoreSpecData::request_concurrency>()),
jb::Member(S3RateLimiterResource::id,
jb::Projection<&S3KeyValueStoreSpecData::rate_limiter>()),
jb::Member(S3RequestRetries::id,
jb::Projection<&S3KeyValueStoreSpecData::retries>()),
jb::Member(DataCopyConcurrencyResource::id,
jb::Projection<
&S3KeyValueStoreSpecData::data_copy_concurrency>())
);
};
std::string GetS3Url(std::string_view bucket, std::string_view path) {
return tensorstore::StrCat(kUriScheme, ":
}
class S3KeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<S3KeyValueStoreSpec,
S3KeyValueStoreSpecData> {
public:
static constexpr char id[] = "s3";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return GetS3Url(data_.bucket, path);
}
};
class S3KeyValueStore
: public internal_kvstore::RegisteredDriver<S3KeyValueStore,
S3KeyValueStoreSpec> {
public:
S3KeyValueStore(std::shared_ptr<HttpTransport> transport,
S3KeyValueStoreSpecData spec)
: transport_(std::move(transport)),
spec_(std::move(spec)),
host_header_(spec_.host_header.value_or(std::string())) {}
internal_kvstore_batch::CoalescingOptions GetBatchReadCoalescingOptions()
const {
internal_kvstore_batch::CoalescingOptions options;
options.max_extra_read_bytes = 4095;
options.target_coalesced_size = 128 * 1024 * 1024;
return options;
}
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<ReadResult> ReadImpl(Key&& key, ReadOptions&& options);
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
Future<const void> DeleteRange(KeyRange range) override;
absl::Status GetBoundSpecData(SpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
const Executor& executor() const {
return spec_.data_copy_concurrency->executor;
}
RateLimiter& read_rate_limiter() {
if (spec_.rate_limiter.has_value()) {
return *(spec_.rate_limiter.value()->read_limiter);
}
return no_rate_limiter_;
}
RateLimiter& write_rate_limiter() {
if (spec_.rate_limiter.has_value()) {
return *(spec_.rate_limiter.value()->write_limiter);
}
return no_rate_limiter_;
}
RateLimiter& admission_queue() { return *spec_.request_concurrency->queue; }
Result<std::optional<AwsCredentials>> GetCredentials() {
return spec_.aws_credentials->GetCredentials();
}
Future<const S3EndpointRegion> MaybeResolveRegion();
template <typename Task>
absl::Status BackoffForAttemptAsync(
absl::Status status, int attempt, Task* task,
SourceLocation loc = ::tensorstore::SourceLocation::current()) {
assert(task != nullptr);
auto delay = spec_.retries->BackoffForAttempt(attempt);
if (!delay) {
return MaybeAnnotateStatus(std::move(status),
absl::StrFormat("All %d retry attempts failed",
spec_.retries->max_retries),
absl::StatusCode::kAborted, loc);
}
s3_metrics.retries.Increment();
ScheduleAt(absl::Now() + *delay,
WithExecutor(executor(), [task = IntrusivePtr<Task>(task)] {
task->Retry();
}));
return absl::OkStatus();
}
internal::NoRateLimiter no_rate_limiter_;
std::shared_ptr<HttpTransport> transport_;
S3KeyValueStoreSpecData spec_;
std::string host_header_;
absl::Mutex mutex_;
Future<const S3EndpointRegion> resolve_ehr_;
};
struct ReadTask : public RateLimiterNode,
public internal::AtomicReferenceCount<ReadTask> {
IntrusivePtr<S3KeyValueStore> owner;
std::string object_name;
kvstore::ReadOptions options;
Promise<kvstore::ReadResult> promise;
std::string read_url_;
ReadyFuture<const S3EndpointRegion> endpoint_region_;
int attempt_ = 0;
absl::Time start_time_;
ReadTask(IntrusivePtr<S3KeyValueStore> owner, std::string object_name,
kvstore::ReadOptions options, Promise<kvstore::ReadResult> promise)
: owner(std::move(owner)),
object_name(std::move(object_name)),
options(std::move(options)),
promise(std::move(promise)) {}
~ReadTask() { owner->admission_queue().Finish(this); }
static void Start(void* task) {
auto* self = reinterpret_cast<ReadTask*>(task);
self->owner->read_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &ReadTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<ReadTask*>(task);
self->owner->executor()(
[state = IntrusivePtr<ReadTask>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (!promise.result_needed()) {
return;
}
AwsCredentials credentials;
if (auto maybe_credentials = owner->GetCredentials();
!maybe_credentials.ok()) {
promise.SetResult(maybe_credentials.status());
return;
} else if (maybe_credentials.value().has_value()) {
credentials = std::move(*maybe_credentials.value());
}
auto request_builder = S3RequestBuilder(
options.byte_range.size() == 0 ? "HEAD" : "GET", read_url_);
AddGenerationHeader(&request_builder, "if-none-match",
options.generation_conditions.if_not_equal);
AddGenerationHeader(&request_builder, "if-match",
options.generation_conditions.if_equal);
if (options.byte_range.size() != 0) {
request_builder.MaybeAddRangeHeader(options.byte_range);
}
const auto& ehr = endpoint_region_.value();
start_time_ = absl::Now();
auto request = request_builder.EnableAcceptEncoding()
.MaybeAddRequesterPayer(owner->spec_.requester_pays)
.BuildRequest(owner->host_header_, credentials,
ehr.aws_region, kEmptySha256, start_time_);
ABSL_LOG_IF(INFO, s3_logging) << "ReadTask: " << request;
auto future = owner->transport_->IssueRequest(request, {});
future.ExecuteWhenReady([self = IntrusivePtr<ReadTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.ok())
<< "ReadTask " << *response;
bool is_retryable = false;
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) {
is_retryable = DefaultIsRetryableCode(response.status().code());
return response.status();
}
switch (response.value().status_code) {
case 412:
case 404:
case 304:
return absl::OkStatus();
}
return AwsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
} else {
promise.SetResult(FinishResponse(response.value()));
}
}
Result<kvstore::ReadResult> FinishResponse(const HttpResponse& httpresponse) {
s3_metrics.bytes_read.IncrementBy(httpresponse.payload.size());
auto latency = absl::Now() - start_time_;
s3_metrics.read_latency_ms.Observe(absl::ToInt64Milliseconds(latency));
switch (httpresponse.status_code) {
case 204:
case 404:
return kvstore::ReadResult::Missing(start_time_);
case 412:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
StorageGeneration::Unknown(), start_time_});
case 304:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
options.generation_conditions.if_not_equal, start_time_});
}
absl::Cord value;
if (options.byte_range.size() != 0) {
ByteRange byte_range;
int64_t total_size;
TENSORSTORE_RETURN_IF_ERROR(internal_http::ValidateResponseByteRange(
httpresponse, options.byte_range, value, byte_range, total_size));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto generation, StorageGenerationFromHeaders(httpresponse.headers));
return kvstore::ReadResult::Value(
std::move(value),
TimestampedStorageGeneration{std::move(generation), start_time_});
}
};
Future<kvstore::ReadResult> S3KeyValueStore::Read(Key key,
ReadOptions options) {
s3_metrics.read.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid S3 object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal) ||
!IsValidStorageGeneration(options.generation_conditions.if_not_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
return internal_kvstore_batch::HandleBatchRequestByGenericByteRangeCoalescing(
*this, std::move(key), std::move(options));
}
Future<kvstore::ReadResult> S3KeyValueStore::ReadImpl(Key&& key,
ReadOptions&& options) {
s3_metrics.batch_read.Increment();
auto op = PromiseFuturePair<ReadResult>::Make();
auto state = internal::MakeIntrusivePtr<ReadTask>(
internal::IntrusivePtr<S3KeyValueStore>(this), key, std::move(options),
std::move(op.promise));
MaybeResolveRegion().ExecuteWhenReady(
[state = std::move(state)](ReadyFuture<const S3EndpointRegion> ready) {
if (!ready.status().ok()) {
state->promise.SetResult(ready.status());
return;
}
state->read_url_ = tensorstore::StrCat(ready.value().endpoint, "/",
state->object_name);
state->endpoint_region_ = std::move(ready);
intrusive_ptr_increment(state.get());
state->owner->read_rate_limiter().Admit(state.get(), &ReadTask::Start);
});
return std::move(op.future);
}
template <typename Base>
struct ConditionTask : public RateLimiterNode,
public internal::AtomicReferenceCount<Base> {
using Self = ConditionTask<Base>;
IntrusivePtr<S3KeyValueStore> owner;
kvstore::WriteOptions options_;
ReadyFuture<const S3EndpointRegion> endpoint_region_;
std::string object_url_;
AwsCredentials credentials_;
ConditionTask(IntrusivePtr<S3KeyValueStore> owner,
kvstore::WriteOptions options,
ReadyFuture<const S3EndpointRegion> endpoint_region,
std::string object_url)
: owner(std::move(owner)),
options_(std::move(options)),
endpoint_region_(std::move(endpoint_region)),
object_url_(std::move(object_url)) {}
static void Start(void* task) {
auto* self = reinterpret_cast<Base*>(task);
self->owner->write_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &Base::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<Base*>(task);
self->owner->executor()(
[state = IntrusivePtr<Base>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (static_cast<Base*>(this)->IsCancelled()) {
return;
}
if (auto maybe_credentials = owner->GetCredentials();
!maybe_credentials.ok()) {
static_cast<Base*>(this)->Fail(maybe_credentials.status());
return;
} else if (maybe_credentials.value().has_value()) {
credentials_ = std::move(*maybe_credentials.value());
}
if (StorageGeneration::IsUnknown(options_.generation_conditions.if_equal)) {
static_cast<Base*>(this)->AfterHeadRequest();
return;
}
auto builder = S3RequestBuilder("HEAD", object_url_);
AddGenerationHeader(&builder, "if-match",
options_.generation_conditions.if_equal);
auto now = absl::Now();
const auto& ehr = endpoint_region_.value();
auto request = builder.MaybeAddRequesterPayer(owner->spec_.requester_pays)
.BuildRequest(owner->host_header_, credentials_,
ehr.aws_region, kEmptySha256, now);
ABSL_LOG_IF(INFO, s3_logging) << "Peek: " << request;
auto future = owner->transport_->IssueRequest(request, {});
future.ExecuteWhenReady([self = IntrusivePtr<Base>(static_cast<Base*>(
this))](ReadyFuture<HttpResponse> response) {
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.result().ok())
<< "Peek (Response): " << response.value();
if (self->IsCancelled()) return;
self->OnHeadResponse(response.result());
});
}
};
struct WriteTask : public ConditionTask<WriteTask> {
using Base = ConditionTask<WriteTask>;
absl::Cord value_;
Promise<TimestampedStorageGeneration> promise;
int attempt_ = 0;
absl::Time start_time_;
WriteTask(IntrusivePtr<S3KeyValueStore> o, kvstore::WriteOptions options,
ReadyFuture<const S3EndpointRegion> endpoint_region,
std::string object_url, absl::Cord value,
Promise<TimestampedStorageGeneration> promise)
: Base(std::move(o), std::move(options), std::move(endpoint_region),
std::move(object_url)),
value_(std::move(value)),
promise(std::move(promise)) {}
~WriteTask() { owner->admission_queue().Finish(this); }
bool IsCancelled() { return !promise.result_needed(); }
void Fail(absl::Status status) { promise.SetResult(std::move(status)); }
void OnHeadResponse(const Result<HttpResponse>& response) {
if (!response.ok()) {
Fail(response.status());
return;
}
TimestampedStorageGeneration r;
r.time = absl::Now();
switch (response.value().status_code) {
case 304:
[[fallthrough]];
case 412:
r.generation = StorageGeneration::Unknown();
promise.SetResult(r);
return;
case 404:
if (!options_.generation_conditions.MatchesNoValue()) {
r.generation = StorageGeneration::Unknown();
promise.SetResult(r);
return;
}
break;
default:
break;
}
AfterHeadRequest();
}
void AfterHeadRequest() {
start_time_ = absl::Now();
auto content_sha256 = payload_sha256(value_);
const auto& ehr = endpoint_region_.value();
auto request =
S3RequestBuilder("PUT", object_url_)
.AddHeader("Content-Type: application/octet-stream")
.AddHeader(absl::StrCat("Content-Length: ", value_.size()))
.MaybeAddRequesterPayer(owner->spec_.requester_pays)
.BuildRequest(owner->host_header_, credentials_, ehr.aws_region,
content_sha256, start_time_);
ABSL_LOG_IF(INFO, s3_logging)
<< "WriteTask: " << request << " size=" << value_.size();
auto future = owner->transport_->IssueRequest(
request, internal_http::IssueRequestOptions(value_));
future.ExecuteWhenReady([self = IntrusivePtr<WriteTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.ok())
<< "WriteTask " << *response;
bool is_retryable = false;
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) {
is_retryable = DefaultIsRetryableCode(response.status().code());
return response.status();
}
return AwsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
return;
}
promise.SetResult(FinishResponse(response.value()));
}
Result<TimestampedStorageGeneration> FinishResponse(
const HttpResponse& response) {
TimestampedStorageGeneration r;
r.time = start_time_;
switch (response.status_code) {
case 404:
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
r.generation = StorageGeneration::Unknown();
return r;
}
}
auto latency = absl::Now() - start_time_;
s3_metrics.write_latency_ms.Observe(absl::ToInt64Milliseconds(latency));
s3_metrics.bytes_written.IncrementBy(value_.size());
TENSORSTORE_ASSIGN_OR_RETURN(
r.generation, StorageGenerationFromHeaders(response.headers));
return r;
}
};
struct DeleteTask : public ConditionTask<DeleteTask> {
using Base = ConditionTask<DeleteTask>;
Promise<TimestampedStorageGeneration> promise;
int attempt_ = 0;
absl::Time start_time_;
DeleteTask(IntrusivePtr<S3KeyValueStore> o, kvstore::WriteOptions options,
ReadyFuture<const S3EndpointRegion> endpoint_region,
std::string object_url,
Promise<TimestampedStorageGeneration> promise)
: Base(std::move(o), std::move(options), std::move(endpoint_region),
std::move(object_url)),
promise(std::move(promise)) {}
~DeleteTask() { owner->admission_queue().Finish(this); }
bool IsCancelled() { return !promise.result_needed(); }
void Fail(absl::Status status) { promise.SetResult(std::move(status)); }
void OnHeadResponse(const Result<HttpResponse>& response) {
if (!response.ok()) {
promise.SetResult(response.status());
return;
}
TimestampedStorageGeneration r;
r.time = absl::Now();
switch (response.value().status_code) {
case 412:
r.generation = StorageGeneration::Unknown();
promise.SetResult(std::move(r));
return;
case 404:
if (!options_.generation_conditions.MatchesNoValue()) {
r.generation = StorageGeneration::Unknown();
promise.SetResult(std::move(r));
return;
}
break;
default:
break;
}
AfterHeadRequest();
}
void AfterHeadRequest() {
start_time_ = absl::Now();
const auto& ehr = endpoint_region_.value();
auto request = S3RequestBuilder("DELETE", object_url_)
.MaybeAddRequesterPayer(owner->spec_.requester_pays)
.BuildRequest(owner->host_header_, credentials_,
ehr.aws_region, kEmptySha256, start_time_);
ABSL_LOG_IF(INFO, s3_logging) << "DeleteTask: " << request;
auto future = owner->transport_->IssueRequest(request, {});
future.ExecuteWhenReady([self = IntrusivePtr<DeleteTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.ok())
<< "DeleteTask " << *response;
bool is_retryable = false;
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) {
is_retryable = DefaultIsRetryableCode(response.status().code());
return response.status();
}
switch (response.value().status_code) {
case 404:
return absl::OkStatus();
default:
break;
}
return AwsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
return;
}
TimestampedStorageGeneration r;
r.time = start_time_;
switch (response.value().status_code) {
case 404:
if (!StorageGeneration::IsNoValue(
options_.generation_conditions.if_equal) &&
!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
r.generation = StorageGeneration::Unknown();
break;
}
[[fallthrough]];
default:
r.generation = StorageGeneration::NoValue();
break;
}
promise.SetResult(std::move(r));
}
};
Future<TimestampedStorageGeneration> S3KeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
s3_metrics.write.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid S3 object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
if (value && value->size() > kMaxS3PutSize) {
return absl::InvalidArgumentError(absl::StrCat(
"Object size ", value->size(), " exceeds S3 limit of ", kMaxS3PutSize));
}
auto op = PromiseFuturePair<TimestampedStorageGeneration>::Make();
MaybeResolveRegion().ExecuteWhenReady(
[self = IntrusivePtr<S3KeyValueStore>(this),
promise = std::move(op.promise), key = std::move(key),
value = std::move(value), options = std::move(options)](
ReadyFuture<const S3EndpointRegion> ready) {
if (!ready.status().ok()) {
promise.SetResult(ready.status());
return;
}
std::string object_url =
tensorstore::StrCat(ready.value().endpoint, "/", key);
if (!value) {
auto state = internal::MakeIntrusivePtr<DeleteTask>(
std::move(self), std::move(options), std::move(ready),
std::move(object_url), std::move(promise));
intrusive_ptr_increment(
state.get());
state->owner->write_rate_limiter().Admit(state.get(),
&DeleteTask::Start);
return;
}
auto state = internal::MakeIntrusivePtr<WriteTask>(
std::move(self), std::move(options), std::move(ready),
std::move(object_url), *std::move(value), std::move(promise));
intrusive_ptr_increment(state.get());
state->owner->write_rate_limiter().Admit(state.get(),
&WriteTask::Start);
});
return std::move(op.future);
}
struct ListTask : public RateLimiterNode,
public internal::AtomicReferenceCount<ListTask> {
internal::IntrusivePtr<S3KeyValueStore> owner_;
ListOptions options_;
ListReceiver receiver_;
std::string resource_;
ReadyFuture<const S3EndpointRegion> endpoint_region_;
std::string continuation_token_;
absl::Time start_time_;
int attempt_ = 0;
bool has_query_parameters_;
std::atomic<bool> cancelled_{false};
ListTask(internal::IntrusivePtr<S3KeyValueStore>&& owner,
ListOptions&& options, ListReceiver&& receiver)
: owner_(std::move(owner)),
options_(std::move(options)),
receiver_(std::move(receiver)) {
execution::set_starting(receiver_, [this] {
cancelled_.store(true, std::memory_order_relaxed);
});
}
~ListTask() {
execution::set_stopping(receiver_);
owner_->admission_queue().Finish(this);
}
inline bool is_cancelled() {
return cancelled_.load(std::memory_order_relaxed);
}
static void Start(void* task) {
auto* self = reinterpret_cast<ListTask*>(task);
self->owner_->read_rate_limiter().Finish(self);
self->owner_->admission_queue().Admit(self, &ListTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<ListTask*>(task);
self->owner_->executor()(
[state = IntrusivePtr<ListTask>(self, internal::adopt_object_ref)] {
state->IssueRequest();
});
}
void Retry() { IssueRequest(); }
void IssueRequest() {
if (is_cancelled()) {
execution::set_done(receiver_);
return;
}
auto request_builder =
S3RequestBuilder("GET", resource_).AddQueryParameter("list-type", "2");
if (auto prefix = LongestPrefix(options_.range); !prefix.empty()) {
request_builder.AddQueryParameter("prefix", std::string(prefix));
}
if (!continuation_token_.empty()) {
request_builder.AddQueryParameter("continuation-token",
continuation_token_);
}
AwsCredentials credentials;
if (auto maybe_credentials = owner_->GetCredentials();
!maybe_credentials.ok()) {
execution::set_error(receiver_, std::move(maybe_credentials).status());
return;
} else if (maybe_credentials.value().has_value()) {
credentials = std::move(*maybe_credentials.value());
}
const auto& ehr = endpoint_region_.value();
start_time_ = absl::Now();
auto request =
request_builder.BuildRequest(owner_->host_header_, credentials,
ehr.aws_region, kEmptySha256, start_time_);
ABSL_LOG_IF(INFO, s3_logging) << "List: " << request;
auto future = owner_->transport_->IssueRequest(request, {});
future.ExecuteWhenReady(WithExecutor(
owner_->executor(), [self = IntrusivePtr<ListTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
}));
}
void OnResponse(const Result<HttpResponse>& response) {
auto status = OnResponseImpl(response);
if (absl::IsCancelled(status)) {
execution::set_done(receiver_);
return;
}
if (!status.ok()) {
execution::set_error(receiver_, std::move(status));
return;
}
}
absl::Status OnResponseImpl(const Result<HttpResponse>& response) {
if (is_cancelled()) {
return absl::CancelledError();
}
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.ok())
<< "List " << *response;
bool is_retryable = false;
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) {
is_retryable = DefaultIsRetryableCode(response.status().code());
return response.status();
}
return AwsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
return owner_->BackoffForAttemptAsync(std::move(status), attempt_++,
this);
}
auto cord = response->payload;
auto payload = cord.Flatten();
tinyxml2::XMLDocument xmlDocument;
if (int xmlcode = xmlDocument.Parse(payload.data(), payload.size());
xmlcode != tinyxml2::XML_SUCCESS) {
return absl::InvalidArgumentError(
absl::StrCat("Malformed List response: ", xmlcode));
}
auto* root = xmlDocument.FirstChildElement("ListBucketResult");
if (root == nullptr) {
return absl::InvalidArgumentError(
"Malformed List response: missing <ListBucketResult>");
}
for (auto* contents = root->FirstChildElement("Contents");
contents != nullptr;
contents = contents->NextSiblingElement("Contents")) {
if (is_cancelled()) {
return absl::CancelledError();
}
auto* key_node = contents->FirstChildElement("Key");
if (key_node == nullptr) {
return absl::InvalidArgumentError(
"Malformed List response: missing <Key> in <Contents>");
}
std::string key = GetNodeText(key_node);
if (key < options_.range.inclusive_min) continue;
if (KeyRange::CompareKeyAndExclusiveMax(
key, options_.range.exclusive_max) >= 0) {
execution::set_done(receiver_);
return absl::OkStatus();
}
int64_t size =
GetNodeInt(contents->FirstChildElement("Size")).value_or(-1);
if (key.size() > options_.strip_prefix_length) {
execution::set_value(
receiver_,
ListEntry{key.substr(options_.strip_prefix_length), size});
}
}
attempt_ = 0;
if (GetNodeText(root->FirstChildElement("IsTruncated")) == "true") {
auto* next_continuation_token =
root->FirstChildElement("NextContinuationToken");
if (next_continuation_token == nullptr) {
return absl::InvalidArgumentError(
"Malformed List response: missing <NextContinuationToken>");
}
continuation_token_ = GetNodeText(next_continuation_token);
IssueRequest();
} else {
execution::set_done(receiver_);
}
return absl::OkStatus();
}
};
void S3KeyValueStore::ListImpl(ListOptions options, ListReceiver receiver) {
s3_metrics.list.Increment();
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
auto state = internal::MakeIntrusivePtr<ListTask>(
IntrusivePtr<S3KeyValueStore>(this), std::move(options),
std::move(receiver));
MaybeResolveRegion().ExecuteWhenReady(
[state = std::move(state)](ReadyFuture<const S3EndpointRegion> ready) {
if (!ready.status().ok()) {
execution::set_error(state->receiver_, ready.status());
return;
}
state->resource_ = tensorstore::StrCat(ready.value().endpoint, "/");
state->endpoint_region_ = std::move(ready);
intrusive_ptr_increment(state.get());
state->owner_->read_rate_limiter().Admit(state.get(), &ListTask::Start);
});
}
struct DeleteRangeListReceiver {
IntrusivePtr<S3KeyValueStore> owner_;
Promise<void> promise_;
FutureCallbackRegistration cancel_registration_;
void set_starting(AnyCancelReceiver cancel) {
cancel_registration_ = promise_.ExecuteWhenNotNeeded(std::move(cancel));
}
void set_value(ListEntry entry) {
assert(!entry.key.empty());
if (!entry.key.empty()) {
LinkError(promise_, owner_->Delete(std::move(entry.key)));
}
}
void set_error(absl::Status error) {
SetDeferredResult(promise_, std::move(error));
promise_ = Promise<void>();
}
void set_done() { promise_ = Promise<void>(); }
void set_stopping() { cancel_registration_.Unregister(); }
};
Future<const void> S3KeyValueStore::DeleteRange(KeyRange range) {
s3_metrics.delete_range.Increment();
if (range.empty()) return absl::OkStatus();
auto op = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
ListOptions list_options;
list_options.range = std::move(range);
ListImpl(list_options, DeleteRangeListReceiver{
internal::IntrusivePtr<S3KeyValueStore>(this),
std::move(op.promise)});
return std::move(op.future);
}
Future<const S3EndpointRegion> S3KeyValueStore::MaybeResolveRegion() {
absl::MutexLock l(&mutex_);
if (!resolve_ehr_.null()) return resolve_ehr_;
resolve_ehr_ = internal_kvstore_s3::ResolveEndpointRegion(
spec_.bucket,
!spec_.endpoint.has_value() || spec_.endpoint.value().empty()
? std::string_view{}
: std::string_view(spec_.endpoint.value()),
spec_.host_header.value_or(std::string{}), transport_);
resolve_ehr_.ExecuteWhenReady([](ReadyFuture<const S3EndpointRegion> ready) {
if (!ready.status().ok()) {
ABSL_LOG_IF(INFO, s3_logging)
<< "S3 driver failed to resolve endpoint: " << ready.status();
} else {
ABSL_LOG_IF(INFO, s3_logging)
<< "S3 driver using endpoint [" << ready.value() << "]";
}
});
return resolve_ehr_;
}
Future<kvstore::DriverPtr> S3KeyValueStoreSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<S3KeyValueStore>(
internal_http::GetDefaultHttpTransport(), data_);
if (data_.rate_limiter.has_value()) {
ABSL_LOG_IF(INFO, s3_logging) << "Using experimental_s3_rate_limiter";
}
auto result = internal_kvstore_s3::ValidateEndpoint(
data_.bucket, data_.aws_region, data_.endpoint.value_or(std::string{}),
driver->host_header_);
if (auto* status = std::get_if<absl::Status>(&result);
status != nullptr && !status->ok()) {
return std::move(*status);
}
if (auto* ehr = std::get_if<S3EndpointRegion>(&result); ehr != nullptr) {
ABSL_LOG_IF(INFO, s3_logging)
<< "S3 driver using endpoint [" << *ehr << "]";
driver->resolve_ehr_ = MakeReadyFuture<S3EndpointRegion>(std::move(*ehr));
}
return driver;
}
Result<kvstore::Spec> ParseS3Url(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == kUriScheme);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
if (!IsValidBucketName(parsed.authority)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid S3 bucket name: ", QuoteString(parsed.authority)));
}
auto decoded_path = parsed.path.empty()
? std::string()
: internal::PercentDecode(parsed.path.substr(1));
auto driver_spec = internal::MakeIntrusivePtr<S3KeyValueStoreSpec>();
driver_spec->data_.bucket = std::string(parsed.authority);
driver_spec->data_.requester_pays = false;
driver_spec->data_.aws_credentials =
Context::Resource<AwsCredentialsResource>::DefaultSpec();
driver_spec->data_.request_concurrency =
Context::Resource<S3ConcurrencyResource>::DefaultSpec();
driver_spec->data_.retries =
Context::Resource<S3RequestRetries>::DefaultSpec();
driver_spec->data_.data_copy_concurrency =
Context::Resource<DataCopyConcurrencyResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec), std::move(decoded_path)};
}
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::S3KeyValueStoreSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{kUriScheme, tensorstore::ParseS3Url};
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::S3KeyValueStore) | #include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::StatusIs;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesListEntry;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::SetDefaultHttpTransport;
namespace {
Context DefaultTestContext() {
return Context{Context::Spec::FromJson({
{"s3_request_retries",
{{"max_retries", 2},
{"initial_delay", "1ms"},
{"max_delay", "2ms"}}},
})
.value()};
}
TEST(S3KeyValueStoreTest, BadBucketNames) {
auto context = DefaultTestContext();
for (auto bucket : {"a", "_abc", "abc_", "a..b", "a.-.b"}) {
EXPECT_FALSE(kvstore::Open({{"driver", "s3"},
{"bucket", bucket},
{"endpoint", "https:
context)
.result())
<< "bucket: " << bucket;
}
for (auto bucket :
{"abc", "abc.1-2-3.abc",
"a."
"0123456789123456789012345678912345678901234567891234567890"
"1234567891234567890123456789123456789012345678912345678901"
"23456789123456789.B"}) {
EXPECT_TRUE(kvstore::Open({{"driver", "s3"},
{"bucket", bucket},
{"endpoint", "https:
{"aws_region", "us-east-1"}},
context)
.result())
<< "bucket: " << bucket;
}
}
TEST(S3KeyValueStoreTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.check_write_read = false;
options.check_data_persists = false;
options.check_data_after_serialization = false;
options.full_spec = {{"driver", "s3"}, {"bucket", "mybucket"}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(S3KeyValueStoreTest, InvalidSpec) {
auto context = DefaultTestContext();
EXPECT_THAT(kvstore::Open(
{{"driver", "s3"}, {"bucket", "my-bucket"}, {"extra", "key"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Open({{"driver", "s3"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open({{"driver", "s3"}, {"bucket", "a"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
struct DefaultHttpTransportSetter {
DefaultHttpTransportSetter(std::shared_ptr<HttpTransport> transport) {
SetDefaultHttpTransport(transport);
}
~DefaultHttpTransportSetter() { SetDefaultHttpTransport(nullptr); }
};
TEST(S3KeyValueStoreTest, SimpleMock_VirtualHost) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{200,
absl::Cord("abcd"),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
{"PUT https:
HttpResponse{
200, absl::Cord(), {{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open(
{{"driver", "s3"}, {"bucket", "my-bucket"}, {"path", "tmp:1/"}},
context)
.result());
auto read_result = kvstore::Read(store, "key_read").result();
EXPECT_THAT(read_result,
MatchesKvsReadResult(absl::Cord("abcd"),
StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
EXPECT_THAT(kvstore::Write(store, "key_write", absl::Cord("xyz")).result(),
MatchesTimestampedStorageGeneration(StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
TENSORSTORE_EXPECT_OK(kvstore::Delete(store, "key_delete"));
int host_header_validated = 0;
for (const auto& request : mock_transport->requests()) {
if (absl::StartsWith(request.url,
"https:
host_header_validated++;
EXPECT_THAT(
request.headers,
testing::Contains("host: my-bucket.s3.us-east-1.amazonaws.com"));
}
}
EXPECT_THAT(host_header_validated, testing::Ge(2));
}
TEST(S3KeyValueStoreTest, SimpleMock_NoVirtualHost) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{200,
absl::Cord("abcd"),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
{"PUT https:
HttpResponse{
200, absl::Cord(), {{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open({{"driver", "s3"},
{"bucket", "my.bucket"},
{"aws_region", "us-east-1"}},
context)
.result());
auto read_result = kvstore::Read(store, "key_read").result();
EXPECT_THAT(read_result,
MatchesKvsReadResult(absl::Cord("abcd"),
StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
EXPECT_THAT(kvstore::Write(store, "key_write", absl::Cord("xyz")).result(),
MatchesTimestampedStorageGeneration(StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
TENSORSTORE_EXPECT_OK(kvstore::Delete(store, "key_delete"));
int host_header_validated = 0;
for (const auto& request : mock_transport->requests()) {
if (absl::StartsWith(request.url, "https:
host_header_validated++;
EXPECT_THAT(request.headers,
testing::Contains("host: s3.us-east-1.amazonaws.com"));
}
}
EXPECT_THAT(host_header_validated, testing::Ge(2));
}
TEST(S3KeyValueStoreTest, SimpleMock_Endpoint) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{200,
absl::Cord("abcd"),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
{"PUT https:
HttpResponse{
200, absl::Cord(), {{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "s3"},
{"bucket", "my-bucket"},
{"endpoint", "https:
{"aws_credentials", {{"anonymous", true}}},
{"path", "tmp:1/"}},
context)
.result());
auto read_result = kvstore::Read(store, "key_read").result();
EXPECT_THAT(read_result,
MatchesKvsReadResult(absl::Cord("abcd"),
StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
EXPECT_THAT(kvstore::Write(store, "key_write", absl::Cord("xyz")).result(),
MatchesTimestampedStorageGeneration(StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
TENSORSTORE_EXPECT_OK(kvstore::Delete(store, "key_delete"));
int host_header_validated = 0;
for (const auto& request : mock_transport->requests()) {
if (absl::StartsWith(request.url, "https:
host_header_validated++;
EXPECT_THAT(request.headers, testing::Contains("host: localhost:1234"));
}
}
EXPECT_THAT(host_header_validated, testing::Ge(2));
}
TEST(S3KeyValueStoreTest, SimpleMock_List) {
const auto kListResultA =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<ListBucketResult xmlns=\"http:
"<Name>bucket</Name>"
"<Prefix></Prefix>"
"<KeyCount>3</KeyCount>"
"<MaxKeys>1000</MaxKeys>"
"<IsTruncated>true</IsTruncated>"
"<NextContinuationToken>CONTINUE</NextContinuationToken>"
"<Contents><Key>a</Key>"
"<LastModified>2023-09-06T17:53:27.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>b</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>b/a</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"</ListBucketResult>";
const auto kListResultB =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<ListBucketResult xmlns=\"http:
"<Name>bucket</Name>"
"<Prefix></Prefix>"
"<KeyCount>2</KeyCount>"
"<MaxKeys>1000</MaxKeys>"
"<IsTruncated>false</IsTruncated>"
"<Contents><Key>b/b</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>c</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"</ListBucketResult>";
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{200, absl::Cord(kListResultA), {}}},
{"GET "
"https:
"?continuation-token=CONTINUE&list-type=2",
HttpResponse{200, absl::Cord(kListResultB), {}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "s3"}, {"bucket", "my-bucket"}}, context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto list_result,
kvstore::ListFuture(store, {}).result());
EXPECT_THAT(list_result, ::testing::ElementsAre(
MatchesListEntry("a"), MatchesListEntry("b"),
MatchesListEntry("b/a"), MatchesListEntry("b/b"),
MatchesListEntry("c")));
}
TEST(S3KeyValueStoreTest, SimpleMock_ListPrefix) {
const auto kListResult =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<ListBucketResult xmlns=\"http:
"<Name>bucket</Name>"
"<Prefix>b</Prefix>"
"<KeyCount>4</KeyCount>"
"<MaxKeys>1000</MaxKeys>"
"<IsTruncated>false</IsTruncated>"
"<Contents><Key>b</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>b/a</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>b/b</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>c</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"</ListBucketResult>";
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET "
"https:
"?list-type=2&prefix=b",
HttpResponse{200,
absl::Cord(kListResult),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "s3"}, {"bucket", "my-bucket"}}, context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto list_result,
kvstore::ListFuture(store, {::tensorstore::KeyRange::Prefix("b")})
.result());
EXPECT_THAT(list_result, ::testing::ElementsAre(MatchesListEntry("b"),
MatchesListEntry("b/a"),
MatchesListEntry("b/b")));
}
TEST(S3KeyValueStoreTest, SimpleMock_RetryTimesOut) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{400,
absl::Cord(R"(<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ThrottledException</Code>
<Message>Endless retry</Message>
<Resource>/my-bucket/tmp:1/key_read</Resource>
<RequestId>4442587FB7D0A2F9</RequestId>
</Error>
)"),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "s3"},
{"bucket", "my-bucket"},
{"endpoint", "https:
{"path", "tmp:1/"}},
context)
.result());
auto read_result = kvstore::Read(store, "key_read").result();
EXPECT_THAT(read_result, StatusIs(absl::StatusCode::kAborted));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b2e3dc4c-9f53-49f1-8f40-d936b4c2f102 | cpp | google/tensorstore | s3_metadata | tensorstore/kvstore/s3/s3_metadata.cc | tensorstore/kvstore/s3/s3_metadata_test.cc | #include "tensorstore/kvstore/s3/s3_metadata.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <initializer_list>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "re2/re2.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tinyxml2.h"
using ::tensorstore::internal_http::HttpResponse;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kEtag[] = "etag";
static constexpr char kLt[] = "<";
static constexpr char kGt[] = ">";
static constexpr char kQuot[] = """;
static constexpr char kApos[] = "'";
static constexpr char kAmp[] = "&";
std::string UnescapeXml(std::string_view data) {
static LazyRE2 kSpecialXmlSymbols = {"(>|<|"|'|&)"};
std::string_view search = data;
std::string_view symbol;
size_t result_len = data.length();
while (RE2::FindAndConsume(&search, *kSpecialXmlSymbols, &symbol)) {
result_len -= symbol.length() - 1;
}
if (result_len == data.length()) {
return std::string(data);
}
search = data;
size_t pos = 0;
size_t res_pos = 0;
auto result = std::string(result_len, '0');
while (RE2::FindAndConsume(&search, *kSpecialXmlSymbols, &symbol)) {
size_t next = data.length() - search.length();
for (size_t i = pos; i < next - symbol.length(); ++i, ++res_pos) {
result[res_pos] = data[i];
}
if (symbol == kGt) {
result[res_pos++] = '>';
} else if (symbol == kLt) {
result[res_pos++] = '<';
} else if (symbol == kQuot) {
result[res_pos++] = '"';
} else if (symbol == kApos) {
result[res_pos++] = '`';
} else if (symbol == kAmp) {
result[res_pos++] = '&';
} else {
assert(false);
}
pos = next;
}
for (size_t i = pos; i < data.length(); ++i, ++res_pos) {
result[res_pos] = data[i];
}
return result;
}
bool IsRetryableAwsStatusCode(int32_t status_code) {
switch (status_code) {
case 408:
case 419:
case 429:
case 440:
case 500:
case 502:
case 503:
case 504:
case 509:
case 598:
case 599:
return true;
default:
return false;
}
}
bool IsRetryableAwsMessageCode(std::string_view code) {
static const absl::NoDestructor<absl::flat_hash_set<std::string_view>>
kRetryableMessages(absl::flat_hash_set<std::string_view>({
"InternalFailureException",
"InternalFailure",
"InternalServerError",
"InternalError",
"RequestExpiredException",
"RequestExpired",
"ServiceUnavailableException",
"ServiceUnavailableError",
"ServiceUnavailable",
"RequestThrottledException",
"RequestThrottled",
"ThrottlingException",
"ThrottledException",
"Throttling",
"SlowDownException",
"SlowDown",
"RequestTimeTooSkewedException",
"RequestTimeTooSkewed",
"RequestTimeoutException",
"RequestTimeout",
}));
return kRetryableMessages->contains(code);
}
}
std::optional<int64_t> GetNodeInt(tinyxml2::XMLNode* node) {
if (!node) {
return std::nullopt;
}
tinyxml2::XMLPrinter printer;
for (auto* child = node->FirstChild(); child != nullptr;
child = child->NextSibling()) {
child->Accept(&printer);
}
int64_t result;
if (absl::SimpleAtoi(printer.CStr(), &result)) {
return result;
}
return std::nullopt;
}
std::optional<absl::Time> GetNodeTimestamp(tinyxml2::XMLNode* node) {
if (!node) {
return std::nullopt;
}
tinyxml2::XMLPrinter printer;
for (auto* child = node->FirstChild(); child != nullptr;
child = child->NextSibling()) {
child->Accept(&printer);
}
absl::Time result;
if (absl::ParseTime(absl::RFC3339_full, printer.CStr(), absl::UTCTimeZone(),
&result, nullptr)) {
return result;
}
return std::nullopt;
}
std::string GetNodeText(tinyxml2::XMLNode* node) {
if (!node) {
return "";
}
tinyxml2::XMLPrinter printer;
for (auto* child = node->FirstChild(); child != nullptr;
child = child->NextSibling()) {
child->Accept(&printer);
}
return UnescapeXml(printer.CStr());
}
Result<StorageGeneration> StorageGenerationFromHeaders(
const absl::btree_multimap<std::string, std::string>& headers) {
if (auto it = headers.find(kEtag); it != headers.end()) {
return StorageGeneration::FromString(it->second);
}
return absl::NotFoundError("etag not found in response headers");
}
absl::Status AwsHttpResponseToStatus(const HttpResponse& response,
bool& retryable, SourceLocation loc) {
auto absl_status_code = internal_http::HttpResponseCodeToStatusCode(response);
if (absl_status_code == absl::StatusCode::kOk) {
return absl::OkStatus();
}
std::string error_type;
if (auto error_header = response.headers.find("x-amzn-errortype");
error_header != response.headers.end()) {
error_type = error_header->second;
}
absl::Cord request_id;
if (auto request_id_header = response.headers.find("x-amzn-requestid");
request_id_header != response.headers.end()) {
request_id = request_id_header->second;
}
std::string message;
auto payload = response.payload;
auto payload_str = payload.Flatten();
[&]() {
if (payload.empty()) return;
tinyxml2::XMLDocument xmlDocument;
if (int xmlcode = xmlDocument.Parse(payload_str.data(), payload_str.size());
xmlcode != tinyxml2::XML_SUCCESS) {
return;
}
auto* root_node = xmlDocument.FirstChildElement("Error");
if (root_node == nullptr) return;
if (error_type.empty()) {
error_type = GetNodeText(root_node->FirstChildElement("Code"));
}
if (request_id.empty()) {
request_id = GetNodeText(root_node->FirstChildElement("RequestId"));
}
message = GetNodeText(root_node->FirstChildElement("Message"));
}();
retryable = error_type.empty()
? IsRetryableAwsStatusCode(response.status_code)
: IsRetryableAwsMessageCode(error_type);
if (error_type.empty()) {
error_type = "Unknown";
}
absl::Status status(absl_status_code,
absl::StrFormat("%s%s%s", error_type,
message.empty() ? "" : ": ", message));
status.SetPayload("http_response_code",
absl::Cord(absl::StrFormat("%d", response.status_code)));
if (!payload_str.empty()) {
status.SetPayload(
"http_response_body",
payload.Subcord(0,
payload_str.size() < 256 ? payload_str.size() : 256));
}
if (!request_id.empty()) {
status.SetPayload("x-amzn-requestid", request_id);
}
MaybeAddSourceLocation(status, loc);
return status;
}
}
} | #include "tensorstore/kvstore/s3/s3_metadata.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/util/status_testutil.h"
#include "tinyxml2.h"
namespace {
using ::tensorstore::StatusIs;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::AwsHttpResponseToStatus;
using ::tensorstore::internal_kvstore_s3::GetNodeInt;
using ::tensorstore::internal_kvstore_s3::GetNodeText;
using ::tensorstore::internal_kvstore_s3::GetNodeTimestamp;
static constexpr char kListXml[] =
R"(<ListBucketResult xmlns="http:
R"(<Name>i-dont-exist</Name>)"
R"(<Prefix>tensorstore/test/</Prefix>)"
R"(<KeyCount>3</KeyCount>)"
R"(<MaxKeys>1000</MaxKeys>)"
R"(<IsTruncated>false</IsTruncated>)"
R"(<Contents>)"
R"(<Key>tensorstore/test/abc</Key>)"
R"(<LastModified>2023-07-08T15:26:55.000Z</LastModified>)"
R"(<ETag>"900150983cd24fb0d6963f7d28e17f72"</ETag>)"
R"(<ChecksumAlgorithm>SHA256</ChecksumAlgorithm>)"
R"(<Size>3</Size>)"
R"(<StorageClass>STANDARD</StorageClass>)"
R"(</Contents>)"
R"(<Contents>)"
R"(<Key>tensorstore/test/ab>cd</Key>)"
R"(<LastModified>2023-07-08T15:26:55.000Z</LastModified>)"
R"(<ETag>"e2fc714c4727ee9395f324cd2e7f331f"</ETag>)"
R"(<ChecksumAlgorithm>SHA256</ChecksumAlgorithm>)"
R"(<Size>4</Size>)"
R"(<StorageClass>STANDARD</StorageClass>)"
R"(</Contents>)"
R"(<Contents>)"
R"(<Key>tensorstore/test/abcde</Key>)"
R"(<LastModified>2023-07-08T15:26:55.000Z</LastModified>)"
R"(<ETag>"ab56b4d92b40713acc5af89985d4b786"</ETag>)"
R"(<ChecksumAlgorithm>SHA256</ChecksumAlgorithm>)"
R"(<Size>5</Size>)"
R"(<StorageClass>STANDARD</StorageClass>)"
R"(</Contents>)"
R"(</ListBucketResult>)";
TEST(XmlSearchTest, GetNodeValues) {
tinyxml2::XMLDocument xmlDocument;
ASSERT_EQ(xmlDocument.Parse(kListXml), tinyxml2::XML_SUCCESS);
auto* root = xmlDocument.FirstChildElement("ListBucketResult");
ASSERT_NE(root, nullptr);
EXPECT_EQ("i-dont-exist", GetNodeText(root->FirstChildElement("Name")));
auto* contents = root->FirstChildElement("Contents");
ASSERT_NE(contents, nullptr);
EXPECT_EQ(R"("900150983cd24fb0d6963f7d28e17f72")",
GetNodeText(contents->FirstChildElement("ETag")));
EXPECT_THAT(GetNodeInt(contents->FirstChildElement("Size")),
::testing::Optional(::testing::Eq(3)));
EXPECT_THAT(
GetNodeTimestamp(contents->FirstChildElement("LastModified")),
::testing::Optional(::testing::Eq(absl::FromUnixSeconds(1688830015))));
}
TEST(S3MetadataTest, AwsHttpResponseToStatus) {
HttpResponse response;
{
response.status_code = 404;
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_FALSE(retryable);
}
{
response.status_code = 429;
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kUnavailable));
EXPECT_TRUE(retryable);
}
{
response.status_code = 400;
response.payload = absl::Cord(R"(<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>UnknownError</Code>
<Message>Unknown message</Message>
<Resource>/mybucket/myfoto.jpg</Resource>
<RequestId>4442587FB7D0A2F9</RequestId>
</Error>
)");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_FALSE(retryable);
}
{
response.status_code = 400;
response.payload = absl::Cord(R"(<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ThrottledException</Code>
<Message>Throttled message</Message>
<Resource>/mybucket/myfoto.jpg</Resource>
<RequestId>4442587FB7D0A2F9</RequestId>
</Error>
)");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_TRUE(retryable);
}
{
response.status_code = 400;
response.headers.emplace("x-amzn-errortype", "UnknownError");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_FALSE(retryable);
}
{
response.status_code = 400;
response.headers.clear();
response.headers.emplace("x-amzn-errortype", "ThrottledException");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_TRUE(retryable);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_metadata.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_metadata_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f63c0329-0154-4e2c-bcda-f6fe9aac6a03 | cpp | google/tensorstore | s3_endpoint | tensorstore/kvstore/s3/s3_endpoint.cc | tensorstore/kvstore/s3/s3_endpoint_test.cc | #include "tensorstore/kvstore/s3/s3_endpoint.h"
#include <cassert>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/s3/validate.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kAmzBucketRegionHeader[] = "x-amz-bucket-region";
struct S3VirtualHostFormatter {
std::string GetEndpoint(std::string_view bucket,
std::string_view aws_region) const {
return absl::StrFormat("https:
aws_region);
}
};
struct S3PathFormatter {
std::string GetEndpoint(std::string_view bucket,
std::string_view aws_region) const {
return absl::StrFormat("https:
bucket);
}
};
struct S3CustomFormatter {
std::string endpoint;
std::string GetEndpoint(std::string_view bucket,
std::string_view aws_region) const {
return absl::StrFormat("%s/%s", endpoint, bucket);
}
};
template <typename Formatter>
struct ResolveHost {
std::string bucket;
std::string default_aws_region;
Formatter formatter;
void operator()(Promise<S3EndpointRegion> promise,
ReadyFuture<HttpResponse> ready) {
if (!promise.result_needed()) return;
auto& headers = ready.value().headers;
if (auto it = headers.find(kAmzBucketRegionHeader); it != headers.end()) {
promise.SetResult(S3EndpointRegion{
formatter.GetEndpoint(bucket, it->second),
it->second,
});
}
if (!default_aws_region.empty()) {
promise.SetResult(S3EndpointRegion{
formatter.GetEndpoint(bucket, default_aws_region),
default_aws_region,
});
}
promise.SetResult(absl::FailedPreconditionError(tensorstore::StrCat(
"Failed to resolve aws_region for bucket ", QuoteString(bucket))));
}
};
}
std::variant<absl::Status, S3EndpointRegion> ValidateEndpoint(
std::string_view bucket, std::string aws_region, std::string_view endpoint,
std::string host_header) {
ABSL_CHECK(!bucket.empty());
if (!host_header.empty() && endpoint.empty()) {
return absl::InvalidArgumentError(
"\"host_header\" cannot be set without also setting \"endpoint\"");
}
if (internal_kvstore_s3::ClassifyBucketName(bucket) ==
internal_kvstore_s3::BucketNameType::kOldUSEast1) {
if (!aws_region.empty() && aws_region != "us-east-1") {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Bucket ", QuoteString(bucket),
" requires aws_region \"us-east-1\", not ", QuoteString(aws_region)));
}
aws_region = "us-east-1";
}
if (endpoint.empty()) {
if (!aws_region.empty()) {
if (!absl::StrContains(bucket, ".")) {
S3VirtualHostFormatter formatter;
return S3EndpointRegion{
formatter.GetEndpoint(bucket, aws_region),
aws_region,
};
}
S3PathFormatter formatter;
return S3EndpointRegion{
formatter.GetEndpoint(bucket, aws_region),
aws_region,
};
}
return absl::OkStatus();
}
auto parsed = internal::ParseGenericUri(endpoint);
if (parsed.scheme != "http" && parsed.scheme != "https") {
return absl::InvalidArgumentError(
tensorstore::StrCat("Endpoint ", endpoint, " has invalid scheme ",
parsed.scheme, ". Should be http(s)."));
}
if (!parsed.query.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Query in endpoint unsupported ", endpoint));
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Fragment in endpoint unsupported ", endpoint));
}
if (!aws_region.empty()) {
S3CustomFormatter formatter{std::string(endpoint)};
return S3EndpointRegion{
formatter.GetEndpoint(bucket, aws_region),
aws_region,
};
}
return absl::OkStatus();
}
Future<S3EndpointRegion> ResolveEndpointRegion(
std::string bucket, std::string_view endpoint, std::string host_header,
std::shared_ptr<internal_http::HttpTransport> transport) {
assert(!bucket.empty());
assert(transport);
assert(IsValidBucketName(bucket));
if (endpoint.empty()) {
if (!absl::StrContains(bucket, ".")) {
std::string url = absl::StrFormat("https:
return PromiseFuturePair<S3EndpointRegion>::Link(
ResolveHost<S3VirtualHostFormatter>{
std::move(bucket), {}, S3VirtualHostFormatter{}},
transport->IssueRequest(
HttpRequestBuilder("HEAD", std::move(url))
.AddHostHeader(host_header)
.BuildRequest(),
{}))
.future;
}
std::string url =
absl::StrFormat("https:
return PromiseFuturePair<S3EndpointRegion>::Link(
ResolveHost<S3PathFormatter>{
std::move(bucket), {}, S3PathFormatter{}},
transport->IssueRequest(
HttpRequestBuilder("HEAD", std ::move(url))
.AddHostHeader(host_header)
.BuildRequest(),
{}))
.future;
}
std::string url = absl::StrFormat("%s/%s", endpoint, bucket);
return PromiseFuturePair<S3EndpointRegion>::Link(
ResolveHost<S3CustomFormatter>{
std::move(bucket), "us-east-1",
S3CustomFormatter{std::string(endpoint)}},
transport->IssueRequest(HttpRequestBuilder("HEAD", std::move(url))
.AddHostHeader(host_header)
.BuildRequest(),
{}))
.future;
}
}
} | #include "tensorstore/kvstore/s3/s3_endpoint.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::ResolveEndpointRegion;
using ::tensorstore::internal_kvstore_s3::S3EndpointRegion;
using ::tensorstore::internal_kvstore_s3::ValidateEndpoint;
namespace {
TEST(ValidateEndpointTest, Basic) {
EXPECT_THAT(ValidateEndpoint("testbucket", {}, {}, {}),
::testing::VariantWith<absl::Status>(absl::OkStatus()));
EXPECT_THAT(ValidateEndpoint("test.bucket", {}, {}, {}),
::testing::VariantWith<absl::Status>(absl::OkStatus()));
EXPECT_THAT(ValidateEndpoint("testbucket", "us-east-1", {}, {}),
::testing::VariantWith<S3EndpointRegion>(testing::_));
EXPECT_THAT(ValidateEndpoint("OldBucket", "us-east-1", {}, {}),
::testing::VariantWith<S3EndpointRegion>(testing::_));
EXPECT_THAT(ValidateEndpoint("OldBucket", {}, {}, {}),
::testing::VariantWith<S3EndpointRegion>(testing::_));
EXPECT_THAT(ValidateEndpoint("OldBucket", "us-west-1", {}, {}),
::testing::VariantWith<absl::Status>(
tensorstore::StatusIs(absl::StatusCode::kInvalidArgument)));
EXPECT_THAT(ValidateEndpoint("testbucket", "region", "http:
::testing::VariantWith<S3EndpointRegion>(
S3EndpointRegion{"http:
EXPECT_THAT(
ValidateEndpoint("testbucket", "region", "http:
::testing::VariantWith<S3EndpointRegion>(
S3EndpointRegion{"http:
EXPECT_THAT(ValidateEndpoint("testbucket", {}, {}, "my.header"),
::testing::VariantWith<absl::Status>(
tensorstore::StatusIs(absl::StatusCode::kInvalidArgument)));
}
TEST(ResolveEndpointRegion, Basic) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"HEAD http:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
S3EndpointRegion ehr;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr,
ResolveEndpointRegion("testbucket", {}, {}, mock_transport).result());
EXPECT_THAT(ehr.endpoint, "https:
EXPECT_THAT(ehr.aws_region, "us-east-1");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr,
ResolveEndpointRegion("test.bucket", {}, {}, mock_transport).result());
EXPECT_THAT(ehr.endpoint, "https:
EXPECT_THAT(ehr.aws_region, "us-east-1");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr, ResolveEndpointRegion("test.bucket", "http:
mock_transport)
.result());
EXPECT_THAT(ehr.endpoint, "http:
EXPECT_THAT(ehr.aws_region, "us-east-1");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr, ResolveEndpointRegion("test.bucket", "http:
"s3.localhost.com", mock_transport)
.result());
EXPECT_THAT(ehr.endpoint, "http:
EXPECT_THAT(ehr.aws_region, "us-east-1");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_endpoint.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_endpoint_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b9fa1a4d-c983-4a54-ac81-e55fce5a564f | cpp | google/tensorstore | default_credential_provider | tensorstore/kvstore/s3/credentials/default_credential_provider.cc | tensorstore/kvstore/s3/credentials/default_credential_provider_test.cc | #include "tensorstore/kvstore/s3/credentials/default_credential_provider.h"
#include <algorithm>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/no_destructor.h"
#include "absl/functional/function_ref.h"
#include "absl/log/absl_log.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include "tensorstore/kvstore/s3/credentials/environment_credential_provider.h"
#include "tensorstore/kvstore/s3/credentials/file_credential_provider.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag s3_logging("s3");
struct AwsCredentialProviderRegistry {
std::vector<std::pair<int, AwsCredentialProviderFn>> providers;
absl::Mutex mutex;
};
AwsCredentialProviderRegistry& GetAwsProviderRegistry() {
static absl::NoDestructor<AwsCredentialProviderRegistry> registry;
return *registry;
}
}
void RegisterAwsCredentialProviderProvider(AwsCredentialProviderFn provider,
int priority) {
auto& registry = GetAwsProviderRegistry();
absl::WriterMutexLock lock(®istry.mutex);
registry.providers.emplace_back(priority, std::move(provider));
std::sort(registry.providers.begin(), registry.providers.end(),
[](const auto& a, const auto& b) { return a.first < b.first; });
}
Result<std::unique_ptr<AwsCredentialProvider>> GetAwsCredentialProvider(
std::string_view filename, std::string_view profile,
std::string_view metadata_endpoint,
std::shared_ptr<internal_http::HttpTransport> transport) {
auto& registry = GetAwsProviderRegistry();
absl::WriterMutexLock lock(®istry.mutex);
for (const auto& provider : registry.providers) {
auto credentials = provider.second();
if (credentials.ok()) return credentials;
}
return std::make_unique<DefaultAwsCredentialsProvider>(
DefaultAwsCredentialsProvider::Options{
std::string{filename}, std::string{profile},
std::string{metadata_endpoint}, transport});
}
DefaultAwsCredentialsProvider::DefaultAwsCredentialsProvider(
Options options, absl::FunctionRef<absl::Time()> clock)
: options_(std::move(options)),
clock_(clock),
credentials_{{}, {}, {}, absl::InfinitePast()} {}
Result<AwsCredentials> DefaultAwsCredentialsProvider::GetCredentials() {
{
absl::ReaderMutexLock lock(&mutex_);
if (credentials_.expires_at > clock_()) {
return credentials_;
}
}
absl::WriterMutexLock lock(&mutex_);
if (provider_) {
auto credentials_result = provider_->GetCredentials();
if (credentials_result.ok()) {
credentials_ = credentials_result.value();
return credentials_;
}
}
bool only_default_options = options_.filename.empty() &&
options_.profile.empty() &&
options_.endpoint.empty();
if (only_default_options) {
provider_ = std::make_unique<EnvironmentCredentialProvider>();
if (auto credentials_result = provider_->GetCredentials();
credentials_result.ok()) {
credentials_ = std::move(credentials_result).value();
return credentials_;
} else if (s3_logging) {
ABSL_LOG_FIRST_N(INFO, 1)
<< "Could not acquire credentials from environment: "
<< credentials_result.status();
}
}
if (only_default_options || !options_.filename.empty() ||
!options_.profile.empty()) {
provider_ = std::make_unique<FileCredentialProvider>(options_.filename,
options_.profile);
if (auto credentials_result = provider_->GetCredentials();
credentials_result.ok()) {
credentials_ = std::move(credentials_result).value();
return credentials_;
} else if (s3_logging) {
ABSL_LOG_FIRST_N(INFO, 1)
<< "Could not acquire credentials from file/profile: "
<< credentials_result.status();
}
}
if (only_default_options || !options_.endpoint.empty()) {
provider_ = std::make_unique<EC2MetadataCredentialProvider>(
options_.endpoint, options_.transport);
if (auto credentials_result = provider_->GetCredentials();
credentials_result.ok()) {
credentials_ = std::move(credentials_result).value();
return credentials_;
} else if (s3_logging) {
ABSL_LOG(INFO)
<< "Could not acquire credentials from EC2 Metadata Server "
<< options_.endpoint << ": " << credentials_result.status();
}
}
provider_ = nullptr;
credentials_ = AwsCredentials::Anonymous();
return credentials_;
}
}
} | #include "tensorstore/kvstore/s3/credentials/default_credential_provider.h"
#include <fstream>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/kvstore/s3/credentials/test_utils.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::DefaultAwsCredentialsProvider;
using ::tensorstore::internal_kvstore_s3::DefaultEC2MetadataFlow;
using Options =
::tensorstore::internal_kvstore_s3::DefaultAwsCredentialsProvider::Options;
static constexpr char kEndpoint[] = "http:
class CredentialFileFactory
: public tensorstore::internal_testing::ScopedTemporaryDirectory {
public:
std::string WriteCredentialsFile() {
auto p = JoinPath(path(), "aws_config");
std::ofstream ofs(p);
ofs << "[alice]\n"
"aws_access_key_id = AKIAIOSFODNN6EXAMPLE\n"
"aws_secret_access_key = "
"wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY\n"
"aws_session_token = abcdef1234567890\n"
"\n";
ofs.close();
return p;
}
};
class DefaultCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override {
UnsetEnv("AWS_ACCESS_KEY_ID");
UnsetEnv("AWS_SECRET_ACCESS_KEY");
UnsetEnv("AWS_SESSION_TOKEN");
}
};
TEST_F(DefaultCredentialProviderTest, AnonymousCredentials) {
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
absl::flat_hash_map<std::string, HttpResponse>());
auto provider = std::make_unique<DefaultAwsCredentialsProvider>(
Options{{}, {}, {}, mock_transport});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
EXPECT_TRUE(credentials.IsAnonymous());
EXPECT_EQ(credentials.expires_at, absl::InfiniteFuture());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials2,
provider->GetCredentials());
EXPECT_TRUE(credentials2.IsAnonymous());
EXPECT_EQ(credentials2.expires_at, absl::InfiniteFuture());
}
TEST_F(DefaultCredentialProviderTest, EnvironmentCredentialIdempotency) {
SetEnv("AWS_ACCESS_KEY_ID", "access");
SetEnv("AWS_SECRET_ACCESS_KEY", "secret");
SetEnv("AWS_SESSION_TOKEN", "token");
auto provider = std::make_unique<DefaultAwsCredentialsProvider>();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "access");
EXPECT_EQ(credentials.secret_key, "secret");
EXPECT_EQ(credentials.session_token, "token");
EXPECT_EQ(credentials.expires_at, absl::InfiniteFuture());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials2,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, credentials2.access_key);
EXPECT_EQ(credentials.secret_key, credentials2.secret_key);
EXPECT_EQ(credentials.session_token, credentials2.session_token);
EXPECT_EQ(credentials.expires_at, credentials2.expires_at);
}
TEST_F(DefaultCredentialProviderTest, ConfigureFileProviderFromOptions) {
auto factory = CredentialFileFactory{};
auto credentials_file = factory.WriteCredentialsFile();
auto provider = std::make_unique<DefaultAwsCredentialsProvider>(
Options{credentials_file, "alice"});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
EXPECT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
EXPECT_EQ(credentials.session_token, "abcdef1234567890");
EXPECT_EQ(credentials.expires_at, absl::InfiniteFuture());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials2,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, credentials2.access_key);
EXPECT_EQ(credentials.secret_key, credentials2.secret_key);
EXPECT_EQ(credentials.session_token, credentials2.session_token);
EXPECT_EQ(credentials.expires_at, credentials2.expires_at);
}
TEST_F(DefaultCredentialProviderTest, ConfigureEC2ProviderFromOptions) {
auto now = absl::Now();
auto stuck_clock = [&]() -> absl::Time { return now; };
auto expiry = now + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kEndpoint, "1234", "ASIA1234567890",
"1234567890abcdef", "token", expiry));
auto provider = std::make_unique<DefaultAwsCredentialsProvider>(
Options{{}, {}, kEndpoint, mock_transport}, stuck_clock);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "ASIA1234567890");
EXPECT_EQ(credentials.secret_key, "1234567890abcdef");
EXPECT_EQ(credentials.session_token, "token");
EXPECT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
mock_transport->Reset(absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{404, absl::Cord{""}}},
});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(credentials, provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "ASIA1234567890");
EXPECT_EQ(credentials.secret_key, "1234567890abcdef");
EXPECT_EQ(credentials.session_token, "token");
EXPECT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
now += absl::Seconds(300);
mock_transport->Reset(
DefaultEC2MetadataFlow(kEndpoint, "1234", "ASIA1234567890",
"1234567890abcdef", "TOKEN", expiry));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(credentials, provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "ASIA1234567890");
EXPECT_EQ(credentials.secret_key, "1234567890abcdef");
EXPECT_EQ(credentials.session_token, "TOKEN");
EXPECT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
mock_transport->Reset(absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{404, absl::Cord{""}}},
});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(credentials, provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "");
EXPECT_EQ(credentials.secret_key, "");
EXPECT_EQ(credentials.session_token, "");
EXPECT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/default_credential_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/default_credential_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a3f54d00-50ef-4c35-82d3-1ecbc16f183e | cpp | google/tensorstore | environment_credential_provider | tensorstore/kvstore/s3/credentials/environment_credential_provider.cc | tensorstore/kvstore/s3/credentials/environment_credential_provider_test.cc | #include "tensorstore/kvstore/s3/credentials/environment_credential_provider.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
using ::tensorstore::internal::GetEnv;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kEnvAwsAccessKeyId[] = "AWS_ACCESS_KEY_ID";
static constexpr char kEnvAwsSecretAccessKey[] = "AWS_SECRET_ACCESS_KEY";
static constexpr char kEnvAwsSessionToken[] = "AWS_SESSION_TOKEN";
}
Result<AwsCredentials> EnvironmentCredentialProvider::GetCredentials() {
auto access_key = GetEnv(kEnvAwsAccessKeyId);
if (!access_key) {
return absl::NotFoundError(absl::StrCat(kEnvAwsAccessKeyId, " not set"));
}
auto secret_key = GetEnv(kEnvAwsSecretAccessKey);
if (!secret_key) {
return absl::NotFoundError(
absl::StrCat(kEnvAwsSecretAccessKey, " not set"));
}
ABSL_LOG_FIRST_N(INFO, 1)
<< "Using Environment Variable " << kEnvAwsAccessKeyId;
auto credentials = AwsCredentials{*access_key, *secret_key};
if (auto session_token = GetEnv(kEnvAwsSessionToken); session_token) {
credentials.session_token = *session_token;
}
credentials.expires_at = absl::InfiniteFuture();
return credentials;
}
}
} | #include "tensorstore/kvstore/s3/credentials/environment_credential_provider.h"
#include <gtest/gtest.h>
#include "tensorstore/internal/env.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_kvstore_s3::EnvironmentCredentialProvider;
class EnvironmentCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override {
for (const char* var :
{"AWS_SHARED_CREDENTIALS_FILE", "AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "AWS_PROFILE"}) {
UnsetEnv(var);
}
}
};
#ifndef _WIN32
TEST_F(EnvironmentCredentialProviderTest, ProviderNoCredentials) {
auto provider = EnvironmentCredentialProvider();
ASSERT_FALSE(provider.GetCredentials().ok());
SetEnv("AWS_ACCESS_KEY_ID", "foo");
SetEnv("AWS_SECRET_ACCESS_KEY", "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(credentials.access_key, "foo");
ASSERT_TRUE(credentials.secret_key.empty());
ASSERT_TRUE(credentials.session_token.empty());
}
#endif
TEST_F(EnvironmentCredentialProviderTest, ProviderAwsCredentialsFromEnv) {
SetEnv("AWS_ACCESS_KEY_ID", "foo");
SetEnv("AWS_SECRET_ACCESS_KEY", "bar");
SetEnv("AWS_SESSION_TOKEN", "qux");
auto provider = EnvironmentCredentialProvider();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(credentials.access_key, "foo");
ASSERT_EQ(credentials.secret_key, "bar");
ASSERT_EQ(credentials.session_token, "qux");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/environment_credential_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/environment_credential_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
17337708-4d38-49da-ac30-9296214bfc5a | cpp | google/tensorstore | file_credential_provider | tensorstore/kvstore/s3/credentials/file_credential_provider.cc | tensorstore/kvstore/s3/credentials/file_credential_provider_test.cc | #include "tensorstore/kvstore/s3/credentials/file_credential_provider.h"
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/lines/line_reading.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
using ::tensorstore::internal::GetEnv;
using ::tensorstore::internal::JoinPath;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag s3_logging("s3");
static constexpr char kEnvAwsCredentialsFile[] = "AWS_SHARED_CREDENTIALS_FILE";
static constexpr char kDefaultAwsCredentialsFilePath[] = ".aws/credentials";
static constexpr char kCfgAwsAccessKeyId[] = "aws_access_key_id";
static constexpr char kCfgAwsSecretAccessKeyId[] = "aws_secret_access_key";
static constexpr char kCfgAwsSessionToken[] = "aws_session_token";
static constexpr char kEnvAwsProfile[] = "AWS_PROFILE";
static constexpr char kDefaultProfile[] = "default";
std::optional<std::string> GetAwsCredentialsFileName() {
if (auto credentials_file = GetEnv(kEnvAwsCredentialsFile);
credentials_file) {
return credentials_file;
}
if (auto home_dir = GetEnv("HOME"); home_dir) {
return JoinPath(*home_dir, kDefaultAwsCredentialsFilePath);
}
return std::nullopt;
}
}
FileCredentialProvider::FileCredentialProvider(std::string_view filename,
std::string_view profile)
: filename_(filename), profile_(profile) {
if (filename_.empty()) {
if (auto credentials_file = GetAwsCredentialsFileName(); credentials_file) {
filename_ = *std::move(credentials_file);
}
}
if (profile_.empty()) {
profile_ = GetEnv(kEnvAwsProfile).value_or(kDefaultProfile);
}
}
Result<AwsCredentials> FileCredentialProvider::GetCredentials() {
if (filename_.empty()) {
return absl::NotFoundError("No credentials file specified");
}
riegeli::FdReader reader(filename_);
if (!reader.ok()) {
return absl::NotFoundError(
absl::StrFormat("Could not open credentials file [%s]", filename_));
}
AwsCredentials credentials{};
std::string_view line;
bool profile_found = false;
while (riegeli::ReadLine(reader, line)) {
auto sline = absl::StripAsciiWhitespace(line);
if (sline.empty() || sline[0] == '#') continue;
if (sline[0] == '[' && sline[sline.size() - 1] == ']') {
if (profile_found) break;
auto section_name =
absl::StripAsciiWhitespace(sline.substr(1, sline.size() - 2));
ABSL_LOG_IF(INFO, s3_logging) << "Found section name [" << section_name
<< "] in file [" << filename_ << "]";
profile_found = (section_name == profile_);
continue;
}
if (profile_found) {
std::pair<std::string_view, std::string_view> kv =
absl::StrSplit(sline, absl::MaxSplits('=', 1));
kv.first = absl::StripAsciiWhitespace(kv.first);
kv.second = absl::StripAsciiWhitespace(kv.second);
if (kv.first == kCfgAwsAccessKeyId) {
credentials.access_key = kv.second;
} else if (kv.first == kCfgAwsSecretAccessKeyId) {
credentials.secret_key = kv.second;
} else if (kv.first == kCfgAwsSessionToken) {
credentials.session_token = kv.second;
}
}
}
if (!profile_found) {
return absl::NotFoundError(
absl::StrFormat("Profile [%s] not found in credentials file [%s]",
profile_, filename_));
}
ABSL_LOG_FIRST_N(INFO, 1)
<< "Using profile [" << profile_ << "] in file [" << filename_ << "]";
credentials.expires_at = absl::InfiniteFuture();
return credentials;
}
}
} | #include "tensorstore/kvstore/s3/credentials/file_credential_provider.h"
#include <fstream>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_kvstore_s3::FileCredentialProvider;
class TestData
: public tensorstore::internal_testing::ScopedTemporaryDirectory {
public:
std::string WriteCredentialsFile() {
auto p = JoinPath(path(), "aws_config");
std::ofstream ofs(p);
ofs << "discarded_value = 500\n"
"\n"
"[default]\n"
"aws_access_key_id =AKIAIOSFODNN7EXAMPLE\n"
"aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\n"
"aws_session_token= abcdef1234567890 \n"
"\n"
"[alice]\n"
"aws_access_key_id = AKIAIOSFODNN6EXAMPLE\n"
"aws_secret_access_key = "
"wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY\n"
"\n";
ofs.close();
return p;
}
};
class FileCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override {
UnsetEnv("AWS_SHARED_CREDENTIALS_FILE");
UnsetEnv("AWS_PROFILE");
}
};
TEST_F(FileCredentialProviderTest, ProviderAwsCredentialsFromFileDefault) {
TestData test_data;
std::string credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
auto provider = FileCredentialProvider("", "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "default");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN7EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "abcdef1234567890");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
TEST_F(FileCredentialProviderTest,
ProviderAwsCredentialsFromFileProfileOverride) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
auto provider = FileCredentialProvider("", "alice");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "alice");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
TEST_F(FileCredentialProviderTest, ProviderAwsCredentialsFromFileProfileEnv) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
SetEnv("AWS_PROFILE", "alice");
auto provider = FileCredentialProvider("", "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "alice");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
TEST_F(FileCredentialProviderTest,
ProviderAwsCredentialsFromFileInvalidProfileEnv) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
SetEnv("AWS_PROFILE", "bob");
auto provider = FileCredentialProvider("", "");
ASSERT_FALSE(provider.GetCredentials().ok());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "bob");
}
TEST_F(FileCredentialProviderTest, ProviderAwsCredentialsFromFileOverride) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
auto provider =
std::make_unique<FileCredentialProvider>(credentials_filename, "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
ASSERT_EQ(provider->GetFileName(), credentials_filename);
ASSERT_EQ(provider->GetProfile(), "default");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN7EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "abcdef1234567890");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
provider =
std::make_unique<FileCredentialProvider>(credentials_filename, "alice");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetFileName(), credentials_filename);
ASSERT_EQ(provider->GetProfile(), "alice");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/file_credential_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/file_credential_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a7970c6c-3eda-4e9f-9136-c7919de2cdd1 | cpp | google/tensorstore | ec2_credential_provider | tensorstore/kvstore/s3/credentials/ec2_credential_provider.cc | tensorstore/kvstore/s3/credentials/ec2_credential_provider_test.cc | #include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/s3_metadata.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/std_optional.h"
ABSL_FLAG(std::optional<std::string>,
tensorstore_aws_ec2_metadata_service_endpoint, std::nullopt,
"Endpoint to used for http access AWS metadata service. "
"Overrides AWS_EC2_METADATA_SERVICE_ENDPOINT.");
using ::tensorstore::Result;
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal::ParseJson;
using ::tensorstore::internal_http::HttpRequestBuilder;
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kMetadataTokenHeader[] = "x-aws-ec2-metadata-token:";
static constexpr char kIamCredentialsPath[] =
"/latest/meta-data/iam/security-credentials/";
static constexpr absl::Duration kConnectTimeout = absl::Milliseconds(200);
static constexpr absl::Duration kDefaultTimeout = absl::Minutes(5);
static constexpr char kSuccess[] = "Success";
std::string GetEC2MetadataServiceEndpoint() {
return GetFlagOrEnvValue(FLAGS_tensorstore_aws_ec2_metadata_service_endpoint,
"AWS_EC2_METADATA_SERVICE_ENDPOINT")
.value_or("http:
}
struct EC2CredentialsResponse {
std::string code;
std::optional<absl::Time> last_updated;
std::optional<std::string> type;
std::optional<std::string> access_key_id;
std::optional<std::string> secret_access_key;
std::optional<std::string> token;
std::optional<absl::Time> expiration;
};
inline constexpr auto EC2CredentialsResponseBinder = jb::Object(
jb::Member("Code", jb::Projection(&EC2CredentialsResponse::code)),
jb::OptionalMember("LastUpdated",
jb::Projection(&EC2CredentialsResponse::last_updated)),
jb::OptionalMember("Type", jb::Projection(&EC2CredentialsResponse::type)),
jb::OptionalMember("AccessKeyId",
jb::Projection(&EC2CredentialsResponse::access_key_id)),
jb::OptionalMember(
"SecretAccessKey",
jb::Projection(&EC2CredentialsResponse::secret_access_key)),
jb::OptionalMember("Token", jb::Projection(&EC2CredentialsResponse::token)),
jb::OptionalMember("Expiration",
jb::Projection(&EC2CredentialsResponse::expiration)));
Result<absl::Cord> GetEC2ApiToken(std::string_view endpoint,
internal_http::HttpTransport& transport) {
const std::string token_url =
tensorstore::StrCat(endpoint, "/latest/api/token");
const std::string request_header =
"x-aws-ec2-metadata-token-ttl-seconds: 21600";
const auto request_options = internal_http::IssueRequestOptions()
.SetRequestTimeout(absl::InfiniteDuration())
.SetConnectTimeout(kConnectTimeout);
for (auto method : {std::string_view("POST"), std::string_view("PUT")}) {
auto token_request = HttpRequestBuilder(method, token_url)
.AddHeader(request_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto token_response,
transport.IssueRequest(token_request, request_options).result());
if (method == "POST" && (token_response.status_code == 405 ||
token_response.status_code == 401)) {
continue;
}
bool is_retryable = false;
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(token_response, is_retryable));
return std::move(token_response.payload);
}
return absl::NotFoundError(
"Failed to obtain EC2 API token from either IMDSv1 or IMDSv2");
}
}
Result<AwsCredentials> EC2MetadataCredentialProvider::GetCredentials() {
if (endpoint_.empty()) {
endpoint_ = GetEC2MetadataServiceEndpoint();
}
TENSORSTORE_ASSIGN_OR_RETURN(auto api_token,
GetEC2ApiToken(endpoint_, *transport_));
auto token_header = tensorstore::StrCat(kMetadataTokenHeader, api_token);
auto iam_role_request =
HttpRequestBuilder("GET",
tensorstore::StrCat(endpoint_, kIamCredentialsPath))
.AddHeader(token_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_role_response,
transport_->IssueRequest(iam_role_request, {}).result());
auto iam_role_plain_text = iam_role_response.payload.Flatten();
bool is_retryable = false;
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(iam_role_response, is_retryable));
std::vector<std::string_view> iam_roles =
absl::StrSplit(iam_role_plain_text, '\n', absl::SkipWhitespace());
if (iam_roles.empty()) {
return absl::NotFoundError("Empty EC2 Role list");
}
auto iam_credentials_request_url =
tensorstore::StrCat(endpoint_, kIamCredentialsPath, iam_roles[0]);
auto iam_credentials_request =
HttpRequestBuilder("GET", iam_credentials_request_url)
.AddHeader(token_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_credentials_response,
transport_->IssueRequest(iam_credentials_request, {}).result());
auto iam_credentials_plain_text = iam_credentials_response.payload.Flatten();
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(iam_credentials_response, is_retryable));
auto json_credentials = ParseJson(iam_credentials_plain_text);
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_credentials,
jb::FromJson<EC2CredentialsResponse>(json_credentials,
EC2CredentialsResponseBinder));
if (iam_credentials.code != kSuccess) {
return absl::NotFoundError(
absl::StrCat("EC2Metadata request to [", iam_credentials_request_url,
"] failed with code ", iam_credentials.code));
}
auto default_timeout = absl::Now() + kDefaultTimeout;
auto expires_at =
iam_credentials.expiration.value_or(default_timeout) - absl::Seconds(60);
return AwsCredentials{iam_credentials.access_key_id.value_or(""),
iam_credentials.secret_access_key.value_or(""),
iam_credentials.token.value_or(""), expires_at};
}
}
} | #include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/kvstore/s3/credentials/test_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::DefaultEC2MetadataFlow;
using ::tensorstore::internal_kvstore_s3::EC2MetadataCredentialProvider;
static constexpr char kDefaultEndpoint[] = "http:
static constexpr char kCustomEndpoint[] = "http:
static constexpr char kApiToken[] = "1234567890";
static constexpr char kAccessKey[] = "ASIA1234567890";
static constexpr char kSecretKey[] = "1234567890abcdef";
static constexpr char kSessionToken[] = "abcdef123456790";
class EC2MetadataCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override { UnsetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT"); }
};
TEST_F(EC2MetadataCredentialProviderTest, CredentialRetrievalFlow) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kDefaultEndpoint, kApiToken, kAccessKey,
kSecretKey, kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, EnvironmentVariableMetadataServer) {
SetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT", kCustomEndpoint);
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, InjectedMetadataServer) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider = std::make_shared<EC2MetadataCredentialProvider>(
kCustomEndpoint, mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, NoIamRolesInSecurityCredentials) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{""}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
ASSERT_FALSE(provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
EXPECT_THAT(provider->GetCredentials().status().ToString(),
::testing::HasSubstr("Empty EC2 Role list"));
}
TEST_F(EC2MetadataCredentialProviderTest, UnsuccessfulJsonResponse) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{"info"}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET http:
HttpResponse{200,
absl::Cord{"mock-iam-role"},
{{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET "
"http:
"mock-iam-role",
HttpResponse{200,
absl::Cord(R"({"Code": "EntirelyUnsuccessful"})"),
{{"x-aws-ec2-metadata-token", kApiToken}}}}};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
auto credentials = provider->GetCredentials();
EXPECT_THAT(credentials.status(), MatchesStatus(absl::StatusCode::kNotFound));
EXPECT_THAT(credentials.status().ToString(),
::testing::AllOf(::testing::HasSubstr("EC2Metadata request"),
::testing::HasSubstr("EntirelyUnsuccessful")));
}
TEST_F(EC2MetadataCredentialProviderTest, IMDSv2AfterFailure) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{405, absl::Cord()}},
{"PUT http:
HttpResponse{401, absl::Cord{}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
auto credentials = provider->GetCredentials();
EXPECT_THAT(credentials.status(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/ec2_credential_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/ec2_credential_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
07d7b812-5975-4ca6-ad06-b484fe868faf | cpp | google/tensorstore | gcs_key_value_store | tensorstore/kvstore/gcs_http/gcs_key_value_store.cc | tensorstore/kvstore/gcs_http/gcs_key_value_store_test.cc | #include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <cassert>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/concurrency_resource.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/google_auth_provider.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/internal/retries_context_resource.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/gcs/gcs_resource.h"
#include "tensorstore/kvstore/gcs/validate.h"
#include "tensorstore/kvstore/gcs_http/gcs_resource.h"
#include "tensorstore/kvstore/gcs_http/object_metadata.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/generic_coalescing_batch_util.h"
#include "tensorstore/kvstore/http/byte_range_util.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/util/garbage_collection/std_optional.h"
ABSL_FLAG(std::optional<std::string>, tensorstore_gcs_http_url, std::nullopt,
"Url to used for http access to google cloud storage. "
"Overrides TENSORSTORE_GCS_HTTP_URL.");
ABSL_FLAG(std::optional<std::string>, tensorstore_gcs_http_version,
std::nullopt,
"Url to used for http access to google cloud storage. "
"Overrides TENSORSTORE_GCS_HTTP_VERSION.");
using ::tensorstore::internal::DataCopyConcurrencyResource;
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::NoRateLimiter;
using ::tensorstore::internal::RateLimiter;
using ::tensorstore::internal::RateLimiterNode;
using ::tensorstore::internal::ScheduleAt;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::internal_kvstore_gcs_http::GcsConcurrencyResource;
using ::tensorstore::internal_kvstore_gcs_http::GcsRateLimiterResource;
using ::tensorstore::internal_kvstore_gcs_http::ObjectMetadata;
using ::tensorstore::internal_kvstore_gcs_http::ParseObjectMetadata;
using ::tensorstore::internal_storage_gcs::GcsHttpResponseToStatus;
using ::tensorstore::internal_storage_gcs::GcsRequestRetries;
using ::tensorstore::internal_storage_gcs::GcsUserProjectResource;
using ::tensorstore::internal_storage_gcs::IsRetriable;
using ::tensorstore::internal_storage_gcs::IsValidBucketName;
using ::tensorstore::internal_storage_gcs::IsValidObjectName;
using ::tensorstore::internal_storage_gcs::IsValidStorageGeneration;
using ::tensorstore::kvstore::Key;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListOptions;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore::kvstore::SupportedFeatures;
namespace {
static constexpr char kUriScheme[] = "gs";
}
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct GcsMetrics : public internal_kvstore::CommonMetrics {
internal_metrics::Counter<int64_t>& retries;
};
auto gcs_metrics = []() -> GcsMetrics {
return {
TENSORSTORE_KVSTORE_COMMON_METRICS(gcs),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
gcs, retries, "count of all retried requests (read/write/delete)")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag gcs_http_logging("gcs_http");
std::string GetGcsBaseUrl() {
return GetFlagOrEnvValue(FLAGS_tensorstore_gcs_http_url,
"TENSORSTORE_GCS_HTTP_URL")
.value_or("https:
}
IssueRequestOptions::HttpVersion GetHttpVersion() {
using HttpVersion = IssueRequestOptions::HttpVersion;
static auto http_version = []() -> HttpVersion {
auto version = GetFlagOrEnvValue(FLAGS_tensorstore_gcs_http_version,
"TENSORSTORE_GCS_HTTP_VERSION");
if (!version) {
ABSL_LOG_IF(INFO, gcs_http_logging)
<< "--tensorstore_gcs_http_version unset";
return HttpVersion::kDefault;
}
ABSL_LOG_IF(INFO, gcs_http_logging)
<< "--tensorstore_gcs_http_version=" << *version;
if (*version == "1" || *version == "1.1") {
return HttpVersion::kHttp1;
}
if (*version == "2" || *version == "2.0") {
return HttpVersion::kHttp2PriorKnowledge;
}
return HttpVersion::kHttp2TLS;
}();
return http_version;
}
bool AddGenerationParam(std::string* url, const bool has_query,
std::string_view param_name,
const StorageGeneration& gen) {
if (StorageGeneration::IsUnknown(gen)) {
return false;
} else {
absl::StrAppend(url, (has_query ? "&" : "?"), param_name, "=",
StorageGeneration::ToUint64(gen));
return true;
}
}
bool AddUserProjectParam(std::string* url, const bool has_query,
std::string_view encoded_user_project) {
if (!encoded_user_project.empty()) {
absl::StrAppend(url, (has_query ? "&" : "?"),
"userProject=", encoded_user_project);
return true;
}
return false;
}
std::string BucketResourceRoot(std::string_view bucket) {
const char kVersion[] = "v1";
return absl::StrCat(GetGcsBaseUrl(), "/storage/", kVersion, "/b/", bucket);
}
std::string BucketUploadRoot(std::string_view bucket) {
const char kVersion[] = "v1";
return absl::StrCat(GetGcsBaseUrl(), "/upload/storage/", kVersion, "/b/",
bucket);
}
struct GcsKeyValueStoreSpecData {
std::string bucket;
Context::Resource<GcsConcurrencyResource> request_concurrency;
std::optional<Context::Resource<GcsRateLimiterResource>> rate_limiter;
Context::Resource<GcsUserProjectResource> user_project;
Context::Resource<GcsRequestRetries> retries;
Context::Resource<DataCopyConcurrencyResource> data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.bucket, x.request_concurrency, x.rate_limiter, x.user_project,
x.retries, x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member("bucket",
jb::Projection<&GcsKeyValueStoreSpecData::bucket>(jb::Validate(
[](const auto& options, const std::string* x) {
if (!IsValidBucketName(*x)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid GCS bucket name: ", QuoteString(*x)));
}
return absl::OkStatus();
}))),
jb::Member(
GcsConcurrencyResource::id,
jb::Projection<&GcsKeyValueStoreSpecData::request_concurrency>()),
jb::Member(GcsRateLimiterResource::id,
jb::Projection<&GcsKeyValueStoreSpecData::rate_limiter>()),
jb::Member(GcsUserProjectResource::id,
jb::Projection<&GcsKeyValueStoreSpecData::user_project>()),
jb::Member(GcsRequestRetries::id,
jb::Projection<&GcsKeyValueStoreSpecData::retries>()),
jb::Member(DataCopyConcurrencyResource::id,
jb::Projection<
&GcsKeyValueStoreSpecData::data_copy_concurrency>())
);
};
std::string GetGcsUrl(std::string_view bucket, std::string_view path) {
return absl::StrCat(kUriScheme, ":
internal::PercentEncodeUriPath(path));
}
class GcsKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<GcsKeyValueStoreSpec,
GcsKeyValueStoreSpecData> {
public:
static constexpr char id[] = "gcs";
absl::Status NormalizeSpec(std::string& path) override {
if (!path.empty() && !IsValidObjectName(path)) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid GCS path: ", QuoteString(path)));
}
return absl::OkStatus();
}
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return GetGcsUrl(data_.bucket, path);
}
};
class GcsKeyValueStore
: public internal_kvstore::RegisteredDriver<GcsKeyValueStore,
GcsKeyValueStoreSpec> {
public:
const std::string& resource_root() const { return resource_root_; }
const std::string& upload_root() const { return upload_root_; }
const std::string& encoded_user_project() const {
return encoded_user_project_;
}
internal_kvstore_batch::CoalescingOptions GetBatchReadCoalescingOptions()
const {
return internal_kvstore_batch::kDefaultRemoteStorageCoalescingOptions;
}
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<ReadResult> ReadImpl(Key&& key, ReadOptions&& options);
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
Future<const void> DeleteRange(KeyRange range) override;
Result<std::optional<std::string>> GetAuthHeader() {
absl::MutexLock lock(&auth_provider_mutex_);
if (!auth_provider_) {
auto result = tensorstore::internal_oauth2::GetSharedGoogleAuthProvider();
if (!result.ok() && absl::IsNotFound(result.status())) {
auth_provider_ = nullptr;
} else {
TENSORSTORE_RETURN_IF_ERROR(result);
auth_provider_ = *std::move(result);
}
}
if (!*auth_provider_) return std::nullopt;
auto auth_header_result = (*auth_provider_)->GetAuthHeader();
if (!auth_header_result.ok() &&
absl::IsNotFound(auth_header_result.status())) {
return std::nullopt;
}
return auth_header_result;
}
const Executor& executor() const {
return spec_.data_copy_concurrency->executor;
}
RateLimiter& read_rate_limiter() {
if (spec_.rate_limiter.has_value()) {
return *(spec_.rate_limiter.value()->read_limiter);
}
return no_rate_limiter_;
}
RateLimiter& write_rate_limiter() {
if (spec_.rate_limiter.has_value()) {
return *(spec_.rate_limiter.value()->write_limiter);
}
return no_rate_limiter_;
}
RateLimiter& admission_queue() { return *spec_.request_concurrency->queue; }
absl::Status GetBoundSpecData(SpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
std::string DescribeKey(std::string_view key) override {
return GetGcsUrl(spec_.bucket, key);
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
template <typename Task>
absl::Status BackoffForAttemptAsync(
absl::Status status, int attempt, Task* task,
SourceLocation loc = ::tensorstore::SourceLocation::current()) {
assert(task != nullptr);
auto delay = spec_.retries->BackoffForAttempt(attempt);
if (!delay) {
return MaybeAnnotateStatus(std::move(status),
absl::StrFormat("All %d retry attempts failed",
spec_.retries->max_retries),
absl::StatusCode::kAborted, loc);
}
gcs_metrics.retries.Increment();
ScheduleAt(absl::Now() + *delay,
WithExecutor(executor(), [task = IntrusivePtr<Task>(task)] {
task->Retry();
}));
return absl::OkStatus();
}
SpecData spec_;
std::string resource_root_;
std::string upload_root_;
std::string encoded_user_project_;
NoRateLimiter no_rate_limiter_;
std::shared_ptr<HttpTransport> transport_;
absl::Mutex auth_provider_mutex_;
std::optional<std::shared_ptr<internal_oauth2::AuthProvider>> auth_provider_;
};
Future<kvstore::DriverPtr> GcsKeyValueStoreSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<GcsKeyValueStore>();
driver->spec_ = data_;
driver->resource_root_ = BucketResourceRoot(data_.bucket);
driver->upload_root_ = BucketUploadRoot(data_.bucket);
driver->transport_ = internal_http::GetDefaultHttpTransport();
if (data_.rate_limiter.has_value()) {
ABSL_LOG_IF(INFO, gcs_http_logging)
<< "Using experimental_gcs_rate_limiter";
}
if (const auto& project_id = data_.user_project->project_id) {
driver->encoded_user_project_ =
internal::PercentEncodeUriComponent(*project_id);
}
return driver;
}
void AddUniqueQueryParameterToDisableCaching(std::string& url) {
struct RandomState {
absl::Mutex mutex;
absl::BitGen gen ABSL_GUARDED_BY(mutex);
};
static RandomState random_state;
uint64_t uuid[2];
absl::MutexLock lock(&random_state.mutex);
for (auto& x : uuid) {
x = absl::Uniform<uint64_t>(random_state.gen);
}
absl::StrAppend(&url, "&tensorstore=", absl::Hex(uuid[0], absl::kZeroPad16),
absl::Hex(uuid[1], absl::kZeroPad16));
}
struct ReadTask : public RateLimiterNode,
public internal::AtomicReferenceCount<ReadTask> {
IntrusivePtr<GcsKeyValueStore> owner;
std::string resource;
kvstore::ReadOptions options;
Promise<kvstore::ReadResult> promise;
int attempt_ = 0;
absl::Time start_time_;
ReadTask(IntrusivePtr<GcsKeyValueStore> owner, std::string resource,
kvstore::ReadOptions options, Promise<kvstore::ReadResult> promise)
: owner(std::move(owner)),
resource(std::move(resource)),
options(std::move(options)),
promise(std::move(promise)) {}
~ReadTask() { owner->admission_queue().Finish(this); }
static void Start(void* task) {
auto* self = reinterpret_cast<ReadTask*>(task);
self->owner->read_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &ReadTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<ReadTask*>(task);
self->owner->executor()(
[state = IntrusivePtr<ReadTask>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (!promise.result_needed()) {
return;
}
std::string media_url = absl::StrCat(
resource, options.byte_range.size() == 0 ? "?alt=json" : "?alt=media");
AddGenerationParam(&media_url, true, "ifGenerationNotMatch",
options.generation_conditions.if_not_equal);
AddGenerationParam(&media_url, true, "ifGenerationMatch",
options.generation_conditions.if_equal);
AddUserProjectParam(&media_url, true, owner->encoded_user_project());
AddUniqueQueryParameterToDisableCaching(media_url);
auto maybe_auth_header = owner->GetAuthHeader();
if (!maybe_auth_header.ok()) {
promise.SetResult(maybe_auth_header.status());
return;
}
HttpRequestBuilder request_builder("GET", media_url);
if (maybe_auth_header.value().has_value()) {
request_builder.AddHeader(*maybe_auth_header.value());
}
if (options.byte_range.size() != 0) {
request_builder.MaybeAddRangeHeader(options.byte_range);
}
auto request = request_builder.EnableAcceptEncoding().BuildRequest();
start_time_ = absl::Now();
ABSL_LOG_IF(INFO, gcs_http_logging) << "ReadTask: " << request;
auto future = owner->transport_->IssueRequest(
request, IssueRequestOptions().SetHttpVersion(GetHttpVersion()));
future.ExecuteWhenReady([self = IntrusivePtr<ReadTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, gcs_http_logging.Level(1) && response.ok())
<< "ReadTask " << *response;
bool is_retryable = IsRetriable(response.status());
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) return response.status();
switch (response.value().status_code) {
case 412:
case 404:
case 304:
return absl::OkStatus();
}
return GcsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
} else {
promise.SetResult(FinishResponse(response.value()));
}
}
Result<kvstore::ReadResult> FinishResponse(const HttpResponse& httpresponse) {
gcs_metrics.bytes_read.IncrementBy(httpresponse.payload.size());
auto latency = absl::Now() - start_time_;
gcs_metrics.read_latency_ms.Observe(absl::ToInt64Milliseconds(latency));
switch (httpresponse.status_code) {
case 204:
case 404:
return kvstore::ReadResult::Missing(start_time_);
case 412:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
StorageGeneration::Unknown(), start_time_});
case 304:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
options.generation_conditions.if_not_equal, start_time_});
}
absl::Cord value;
ObjectMetadata metadata;
if (options.byte_range.size() != 0) {
ByteRange byte_range;
int64_t total_size;
TENSORSTORE_RETURN_IF_ERROR(internal_http::ValidateResponseByteRange(
httpresponse, options.byte_range, value, byte_range, total_size));
SetObjectMetadataFromHeaders(httpresponse.headers, &metadata);
} else {
absl::Cord cord = httpresponse.payload;
TENSORSTORE_ASSIGN_OR_RETURN(metadata,
ParseObjectMetadata(cord.Flatten()));
}
auto generation = StorageGeneration::FromUint64(metadata.generation);
return kvstore::ReadResult::Value(
std::move(value),
TimestampedStorageGeneration{std::move(generation), start_time_});
}
};
Future<kvstore::ReadResult> GcsKeyValueStore::Read(Key key,
ReadOptions options) {
gcs_metrics.read.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid GCS object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal) ||
!IsValidStorageGeneration(options.generation_conditions.if_not_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
return internal_kvstore_batch::HandleBatchRequestByGenericByteRangeCoalescing(
*this, std::move(key), std::move(options));
}
Future<kvstore::ReadResult> GcsKeyValueStore::ReadImpl(Key&& key,
ReadOptions&& options) {
gcs_metrics.batch_read.Increment();
auto encoded_object_name = internal::PercentEncodeUriComponent(key);
std::string resource = tensorstore::internal::JoinPath(resource_root_, "/o/",
encoded_object_name);
auto op = PromiseFuturePair<ReadResult>::Make();
auto state = internal::MakeIntrusivePtr<ReadTask>(
internal::IntrusivePtr<GcsKeyValueStore>(this), std::move(resource),
std::move(options), std::move(op.promise));
intrusive_ptr_increment(state.get());
read_rate_limiter().Admit(state.get(), &ReadTask::Start);
return std::move(op.future);
}
struct WriteTask : public RateLimiterNode,
public internal::AtomicReferenceCount<WriteTask> {
IntrusivePtr<GcsKeyValueStore> owner;
std::string encoded_object_name;
absl::Cord value;
kvstore::WriteOptions options;
Promise<TimestampedStorageGeneration> promise;
int attempt_ = 0;
absl::Time start_time_;
WriteTask(IntrusivePtr<GcsKeyValueStore> owner,
std::string encoded_object_name, absl::Cord value,
kvstore::WriteOptions options,
Promise<TimestampedStorageGeneration> promise)
: owner(std::move(owner)),
encoded_object_name(std::move(encoded_object_name)),
value(std::move(value)),
options(std::move(options)),
promise(std::move(promise)) {}
~WriteTask() { owner->admission_queue().Finish(this); }
static void Start(void* task) {
auto* self = reinterpret_cast<WriteTask*>(task);
self->owner->write_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &WriteTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<WriteTask*>(task);
self->owner->executor()(
[state = IntrusivePtr<WriteTask>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (!promise.result_needed()) {
return;
}
std::string upload_url =
absl::StrCat(owner->upload_root(), "/o", "?uploadType=media",
"&name=", encoded_object_name);
AddGenerationParam(&upload_url, true, "ifGenerationMatch",
options.generation_conditions.if_equal);
AddUserProjectParam(&upload_url, true, owner->encoded_user_project());
auto maybe_auth_header = owner->GetAuthHeader();
if (!maybe_auth_header.ok()) {
promise.SetResult(maybe_auth_header.status());
return;
}
HttpRequestBuilder request_builder("POST", upload_url);
if (maybe_auth_header.value().has_value()) {
request_builder.AddHeader(*maybe_auth_header.value());
}
auto request =
request_builder.AddHeader("Content-Type: application/octet-stream")
.AddHeader(absl::StrCat("Content-Length: ", value.size()))
.BuildRequest();
start_time_ = absl::Now();
ABSL_LOG_IF(INFO, gcs_http_logging)
<< "WriteTask: " << request << " size=" << value.size();
auto future = owner->transport_->IssueRequest(
request, IssueRequestOptions(value).SetHttpVersion(GetHttpVersion()));
future.ExecuteWhenReady([self = IntrusivePtr<WriteTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, gcs_http_logging.Level(1) && response.ok())
<< "WriteTask " << *response;
bool is_retryable = IsRetriable(response.status());
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) return response.status();
switch (response.value().status_code) {
case 304:
[[fallthrough]];
case 412:
return absl::OkStatus();
case 404:
if (!options.generation_conditions.MatchesNoValue()) {
return absl::OkStatus();
}
break;
default:
break;
}
return GcsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
} else {
promise.SetResult(FinishResponse(response.value()));
}
}
Result<TimestampedStorageGeneration> FinishResponse(
const HttpResponse& httpresponse) {
TimestampedStorageGeneration r;
r.time = start_time_;
switch (httpresponse.status_code) {
case 304:
[[fallthrough]];
case 412:
r.generation = StorageGeneration::Unknown();
return r;
case 404:
if (!StorageGeneration::IsUnknown(
options.generation_conditions.if_equal)) {
r.generation = StorageGeneration::Unknown();
return r;
}
}
auto latency = absl::Now() - start_time_;
gcs_metrics.write_latency_ms.Observe(absl::ToInt64Milliseconds(latency));
gcs_metrics.bytes_written.IncrementBy(value.size());
auto payload = httpresponse.payload;
auto parsed_object_metadata = ParseObjectMetadata(payload.Flatten());
TENSORSTORE_RETURN_IF_ERROR(parsed_object_metadata);
r.generation =
StorageGeneration::FromUint64(parsed_object_metadata->generation);
return r;
}
};
struct DeleteTask : public RateLimiterNode,
public internal::AtomicReferenceCount<DeleteTask> {
IntrusivePtr<GcsKeyValueStore> owner;
std::string resource;
kvstore::WriteOptions options;
Promise<TimestampedStorageGeneration> promise;
int attempt_ = 0;
absl::Time start_time_;
DeleteTask(IntrusivePtr<GcsKeyValueStore> owner, std::string resource,
kvstore::WriteOptions options,
Promise<TimestampedStorageGeneration> promise)
: owner(std::move(owner)),
resource(std::move(resource)),
options(std::move(options)),
promise(std::move(promise)) {}
~DeleteTask() { owner->admission_queue().Finish(this); }
static void Start(void* task) {
auto* self = reinterpret_cast<DeleteTask*>(task);
self->owner->write_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &DeleteTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<DeleteTask*>(task);
self->owner->executor()(
[state = IntrusivePtr<DeleteTask>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (!promise.result_needed()) {
return;
}
std::string delete_url = resource;
bool has_query = AddGenerationParam(&delete_url, false, "ifGenerationMatch",
options.generation_conditions.if_equal);
AddUserProjectParam(&delete_url, has_query, owner->encoded_user_project());
auto maybe_auth_header = owner->GetAuthHeader();
if (!maybe_auth_header.ok()) {
promise.SetResult(maybe_auth_header.status());
return;
}
HttpRequestBuilder request_builder("DELETE", delete_url);
if (maybe_auth_header.value().has_value()) {
request_builder.AddHeader(*maybe_auth_header.value());
}
auto request = request_builder.BuildRequest();
start_time_ = absl::Now();
ABSL_LOG_IF(INFO, gcs_http_logging) << "DeleteTask: " << request;
auto future = owner->transport_->IssueRequest(
request, IssueRequestOptions().SetHttpVersion(GetHttpVersion()));
future.ExecuteWhenReady([self = IntrusivePtr<DeleteTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, gcs_http_logging.Level(1) && response.ok())
<< "DeleteTask " << *response;
bool is_retryable = IsRetriable(response.status());
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) return response.status();
switch (response.value().status_code) {
case 412:
[[fallthrough]];
case 404:
return absl::OkStatus();
default:
break;
}
return GcsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
return;
}
TimestampedStorageGeneration r;
r.time = start_time_;
switch (response.value().status_code) {
case 412:
r.generation = StorageGeneration::Unknown();
break;
case 404:
if (!options.generation_conditions.MatchesNoValue()) {
r.generation = StorageGeneration::Unknown();
break;
}
[[fallthrough]];
default:
r.generation = StorageGeneration::NoValue();
break;
}
promise.SetResult(std::move(r));
}
};
Future<TimestampedStorageGeneration> GcsKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
gcs_metrics.write.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid GCS object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
std::string encoded_object_name = internal::PercentEncodeUriComponent(key);
auto op = PromiseFuturePair<TimestampedStorageGeneration>::Make();
if (value) {
auto state = internal::MakeIntrusivePtr<WriteTask>(
IntrusivePtr<GcsKeyValueStore>(this), std::move(encoded_object_name),
*std::move(value), std::move(options), std::move(op.promise));
intrusive_ptr_increment(state.get());
write_rate_limiter().Admit(state.get(), &WriteTask::Start);
} else {
std::string resource = tensorstore::internal::JoinPath(
resource_root_, "/o/", encoded_object_name);
auto state = internal::MakeIntrusivePtr<DeleteTask>(
IntrusivePtr<GcsKeyValueStore>(this), std::move(resource),
std::move(options), std::move(op.promise));
intrusive_ptr_increment(state.get());
write_rate_limiter().Admit(state.get(), &DeleteTask::Start);
}
return std::move(op.future);
}
struct GcsListResponsePayload {
std::string next_page_token;
std::vector<ObjectMetadata> items;
};
constexpr static auto GcsListResponsePayloadBinder = jb::Object(
jb::Member("nextPageToken",
jb::Projection(&GcsListResponsePayload::next_page_token,
jb::DefaultInitializedValue())),
jb::Member("items", jb::Projection(&GcsListResponsePayload::items,
jb::DefaultInitializedValue())),
jb::DiscardExtraMembers);
struct ListTask : public RateLimiterNode,
public internal::AtomicReferenceCount<ListTask> {
internal::IntrusivePtr<GcsKeyValueStore> owner_;
ListOptions options_;
ListReceiver receiver_;
std::string resource_;
std::string base_list_url_;
std::string next_page_token_;
int attempt_ = 0;
bool has_query_parameters_;
std::atomic<bool> cancelled_{false};
ListTask(internal::IntrusivePtr<GcsKeyValueStore>&& owner,
ListOptions&& options, ListReceiver&& receiver,
std::string&& resource)
: owner_(std::move(owner)),
options_(std::move(options)),
receiver_(std::move(receiver)),
resource_(std::move(resource)) {
base_list_url_ = resource_;
has_query_parameters_ = AddUserProjectParam(&base_list_url_, false,
owner_->encoded_user_project());
if (auto& inclusive_min = options_.range.inclusive_min;
!inclusive_min.empty()) {
absl::StrAppend(
&base_list_url_, (has_query_parameters_ ? "&" : "?"),
"startOffset=", internal::PercentEncodeUriComponent(inclusive_min));
has_query_parameters_ = true;
}
if (auto& exclusive_max = options_.range.exclusive_max;
!exclusive_max.empty()) {
absl::StrAppend(
&base_list_url_, (has_query_parameters_ ? "&" : "?"),
"endOffset=", internal::PercentEncodeUriComponent(exclusive_max));
has_query_parameters_ = true;
}
}
~ListTask() { owner_->admission_queue().Finish(this); }
inline bool is_cancelled() {
return cancelled_.load(std::memory_order_relaxed);
}
static void Start(void* task) {
auto* self = reinterpret_cast<ListTask*>(task);
self->owner_->read_rate_limiter().Finish(self);
self->owner_->admission_queue().Admit(self, &ListTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<ListTask*>(task);
execution::set_starting(self->receiver_, [self] {
self->cancelled_.store(true, std::memory_order_relaxed);
});
self->owner_->executor()(
[state = IntrusivePtr<ListTask>(self, internal::adopt_object_ref)] {
state->IssueRequest();
});
}
void Retry() { IssueRequest(); }
void IssueRequest() {
if (is_cancelled()) {
execution::set_done(receiver_);
execution::set_stopping(receiver_);
return;
}
std::string list_url = base_list_url_;
if (!next_page_token_.empty()) {
absl::StrAppend(&list_url, (has_query_parameters_ ? "&" : "?"),
"pageToken=", next_page_token_);
}
auto auth_header = owner_->GetAuthHeader();
if (!auth_header.ok()) {
execution::set_error(receiver_, std::move(auth_header).status());
execution::set_stopping(receiver_);
return;
}
HttpRequestBuilder request_builder("GET", list_url);
if (auth_header->has_value()) {
request_builder.AddHeader(auth_header->value());
}
auto request = request_builder.BuildRequest();
ABSL_LOG_IF(INFO, gcs_http_logging) << "List: " << request;
auto future = owner_->transport_->IssueRequest(
request, IssueRequestOptions().SetHttpVersion(GetHttpVersion()));
future.ExecuteWhenReady(WithExecutor(
owner_->executor(), [self = IntrusivePtr<ListTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
}));
}
void OnResponse(const Result<HttpResponse>& response) {
auto status = OnResponseImpl(response);
if (absl::IsCancelled(status)) {
execution::set_done(receiver_);
execution::set_stopping(receiver_);
return;
}
if (!status.ok()) {
execution::set_error(receiver_, std::move(status));
execution::set_stopping(receiver_);
return;
}
}
absl::Status OnResponseImpl(const Result<HttpResponse>& response) {
if (is_cancelled()) {
return absl::CancelledError();
}
ABSL_LOG_IF(INFO, gcs_http_logging.Level(1) && response.ok())
<< "List " << *response;
bool is_retryable = IsRetriable(response.status());
absl::Status status =
response.ok() ? GcsHttpResponseToStatus(response.value(), is_retryable)
: response.status();
if (!status.ok() && is_retryable) {
return owner_->BackoffForAttemptAsync(std::move(status), attempt_++,
this);
}
auto payload = response->payload;
auto j = internal::ParseJson(payload.Flatten());
if (j.is_discarded()) {
return absl::InternalError(absl::StrCat(
"Failed to parse response metadata: ", payload.Flatten()));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto parsed_payload,
jb::FromJson<GcsListResponsePayload>(j, GcsListResponsePayloadBinder));
for (auto& metadata : parsed_payload.items) {
if (is_cancelled()) {
return absl::CancelledError();
}
std::string_view name = metadata.name;
if (options_.strip_prefix_length) {
name = name.substr(options_.strip_prefix_length);
}
execution::set_value(receiver_,
ListEntry{
std::string(name),
ListEntry::checked_size(metadata.size),
});
}
attempt_ = 0;
next_page_token_ = std::move(parsed_payload.next_page_token);
if (!next_page_token_.empty()) {
IssueRequest();
} else {
execution::set_done(receiver_);
execution::set_stopping(receiver_);
}
return absl::OkStatus();
}
};
void GcsKeyValueStore::ListImpl(ListOptions options, ListReceiver receiver) {
gcs_metrics.list.Increment();
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
auto state = internal::MakeIntrusivePtr<ListTask>(
IntrusivePtr<GcsKeyValueStore>(this), std::move(options),
std::move(receiver),
tensorstore::internal::JoinPath(resource_root_, "/o"));
intrusive_ptr_increment(state.get());
read_rate_limiter().Admit(state.get(), &ListTask::Start);
}
struct DeleteRangeListReceiver {
IntrusivePtr<GcsKeyValueStore> owner_;
Promise<void> promise_;
FutureCallbackRegistration cancel_registration_;
void set_starting(AnyCancelReceiver cancel) {
cancel_registration_ = promise_.ExecuteWhenNotNeeded(std::move(cancel));
}
void set_value(ListEntry entry) {
assert(!entry.key.empty());
if (!entry.key.empty()) {
LinkError(promise_, owner_->Delete(std::move(entry.key)));
}
}
void set_error(absl::Status error) {
SetDeferredResult(promise_, std::move(error));
promise_ = Promise<void>();
}
void set_done() { promise_ = Promise<void>(); }
void set_stopping() { cancel_registration_.Unregister(); }
};
Future<const void> GcsKeyValueStore::DeleteRange(KeyRange range) {
gcs_metrics.delete_range.Increment();
if (range.empty()) return absl::OkStatus();
auto op = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
ListOptions list_options;
list_options.range = std::move(range);
ListImpl(list_options, DeleteRangeListReceiver{
internal::IntrusivePtr<GcsKeyValueStore>(this),
std::move(op.promise)});
return std::move(op.future);
}
Result<kvstore::Spec> ParseGcsUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == kUriScheme);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
if (!IsValidBucketName(parsed.authority)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid GCS bucket name: ", QuoteString(parsed.authority)));
}
auto decoded_path = parsed.path.empty()
? std::string()
: internal::PercentDecode(parsed.path.substr(1));
auto driver_spec = internal::MakeIntrusivePtr<GcsKeyValueStoreSpec>();
driver_spec->data_.bucket = std::string(parsed.authority);
driver_spec->data_.request_concurrency =
Context::Resource<GcsConcurrencyResource>::DefaultSpec();
driver_spec->data_.user_project =
Context::Resource<GcsUserProjectResource>::DefaultSpec();
driver_spec->data_.retries =
Context::Resource<GcsRequestRetries>::DefaultSpec();
driver_spec->data_.data_copy_concurrency =
Context::Resource<DataCopyConcurrencyResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec), std::move(decoded_path)};
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::GcsKeyValueStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::GcsKeyValueStoreSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{kUriScheme, tensorstore::ParseGcsUrl};
} | #include <stddef.h>
#include <algorithm>
#include <atomic>
#include <memory>
#include <string>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/internal/oauth2/google_auth_provider.h"
#include "tensorstore/internal/oauth2/google_auth_test_utils.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/gcs_http/gcs_mock.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::CompletionNotifyingReceiver;
using ::tensorstore::Context;
using ::tensorstore::Future;
using ::tensorstore::GCSMockStorageBucket;
using ::tensorstore::KeyRange;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::MatchesListEntry;
using ::tensorstore::internal::ScheduleAt;
using ::tensorstore::internal_http::ApplyResponseToHandler;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpResponseHandler;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::internal_http::SetDefaultHttpTransport;
using ::tensorstore::internal_oauth2::GoogleAuthTestScope;
static constexpr char kDriver[] = "gcs";
class MetadataMockHelper {
public:
tensorstore::Result<HttpResponse> GetResponse(const HttpRequest& request) {
auto parsed = tensorstore::internal::ParseGenericUri(request.url);
if (!absl::StartsWith(parsed.authority_and_path,
"metadata.google.internal/")) {
return absl::UnimplementedError("Mock cannot satisfy the request.");
}
constexpr char kOAuthPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/user@nowhere.com/token";
if (absl::StartsWith(parsed.authority_and_path, kOAuthPath)) {
return HttpResponse{
200,
absl::Cord(
R"({ "token_type" : "refresh", "access_token": "abc", "expires_in": 3600 })")};
}
constexpr char kServiceAccountPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/default/";
if (absl::StartsWith(parsed.authority_and_path, kServiceAccountPath)) {
return HttpResponse{
200, absl::Cord(
R"({ "email": "user@nowhere.com", "scopes": [ "test" ] })")};
}
return HttpResponse{200, absl::Cord()};
}
GoogleAuthTestScope google_auth_test_scope;
};
class MyMockTransport : public HttpTransport {
public:
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) override {
ApplyResponseToHandler(
[&]() -> Result<HttpResponse> {
auto result = metadata_mock_.GetResponse(request);
if (result.ok()) return result;
for (auto* bucket : buckets_) {
result = bucket->IssueRequest(request, options.payload);
if (result.ok()) break;
}
return result;
}(),
response_handler);
}
MetadataMockHelper metadata_mock_;
std::vector<GCSMockStorageBucket*> buckets_;
};
struct DefaultHttpTransportSetter {
DefaultHttpTransportSetter(std::shared_ptr<HttpTransport> transport) {
SetDefaultHttpTransport(transport);
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
}
~DefaultHttpTransportSetter() {
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
SetDefaultHttpTransport(nullptr);
}
};
Context DefaultTestContext() {
return Context{Context::Spec::FromJson({{"gcs_request_retries",
{{"max_retries", 4},
{"initial_delay", "1ms"},
{"max_delay", "5ms"}}}})
.value()};
}
TEST(GcsKeyValueStoreTest, BadBucketNames) {
auto context = DefaultTestContext();
for (auto bucket :
{"a", "_abc", "abc_", "ABC", "a..b", "a.-.b",
"a."
"0123456789123456789012345678912345678901234567891234567890"
"1234567891234567890123456789123456789012345678912345678901"
"23456789123456789.b"}) {
EXPECT_FALSE(
kvstore::Open({{"driver", kDriver}, {"bucket", bucket}}, context)
.result())
<< "bucket: " << bucket;
}
for (auto bucket : {"abc", "abc.1-2_3.abc"}) {
EXPECT_TRUE(
kvstore::Open({{"driver", kDriver}, {"bucket", bucket}}, context)
.result())
<< "bucket: " << bucket;
}
}
TEST(GcsKeyValueStoreTest, BadObjectNames) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
EXPECT_THAT(kvstore::Read(store, ".").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Read(store, "..").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Read(store, ".well-known/acme-challenge").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Read(store, "foo\nbar").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Read(store, "foo\rbar").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
{
kvstore::ReadOptions options;
options.generation_conditions.if_not_equal =
StorageGeneration::FromString("abc123");
EXPECT_THAT(kvstore::Read(store, "abc", options).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
TEST(GcsKeyValueStoreTest, Basic) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec, store.spec());
EXPECT_THAT(spec.ToJson(tensorstore::IncludeDefaults{false}),
::testing::Optional(
MatchesJson({{"driver", kDriver}, {"bucket", "my-bucket"}})));
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST(GcsKeyValueStoreTest, Retry) {
for (int max_retries : {2, 3, 4}) {
for (bool fail : {false, true}) {
ABSL_LOG(INFO) << max_retries << (fail ? " fail" : " success");
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", kDriver},
{"bucket", "my-bucket"},
{"context",
{
{"gcs_request_retries",
{{"max_retries", max_retries},
{"initial_delay", "1ms"},
{"max_delay", "10ms"}}},
}}},
context)
.result());
if (fail) {
bucket.TriggerErrors(max_retries + 1);
EXPECT_THAT(kvstore::Read(store, "x").result(),
MatchesStatus(absl::StatusCode::kAborted));
} else {
bucket.TriggerErrors(max_retries - 2);
TENSORSTORE_EXPECT_OK(kvstore::Read(store, "x").result());
}
}
}
}
TEST(GcsKeyValueStoreTest, List) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
CompletionNotifyingReceiver{¬ification,
tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_done",
"set_stopping"));
}
EXPECT_THAT(ListFuture(store, {}).result(),
::testing::Optional(::testing::ElementsAre()));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/b", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/d", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/x", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/y", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/e", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/f", absl::Cord("xyz")));
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
CompletionNotifyingReceiver{¬ification,
tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(
log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a/d", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e", "set_value: a/c/x",
"set_value: a/b", "set_done", "set_stopping"));
}
EXPECT_THAT(ListFuture(store, {}).result(),
::testing::Optional(::testing::UnorderedElementsAre(
MatchesListEntry("a/d"), MatchesListEntry("a/c/z/f"),
MatchesListEntry("a/c/y"), MatchesListEntry("a/c/z/e"),
MatchesListEntry("a/c/x"), MatchesListEntry("a/b"))));
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {KeyRange::Prefix("a/c/")}),
CompletionNotifyingReceiver{¬ification,
tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e",
"set_value: a/c/x", "set_done", "set_stopping"));
}
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
CompletionNotifyingReceiver{
¬ification, tensorstore::CancelOnStartingReceiver{{&log}}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_done",
"set_stopping"));
}
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
CompletionNotifyingReceiver{
¬ification, tensorstore::CancelAfterNReceiver<2>{{&log}}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::Contains("set_starting"));
EXPECT_THAT(log, ::testing::Contains("set_done"));
EXPECT_THAT(log, ::testing::Contains("set_stopping"));
EXPECT_LE(4, log.size());
EXPECT_THAT(
log, ::testing::Contains(::testing::AnyOf(
"set_value: a/d", "set_value: a/c/z/f", "set_value: a/c/y",
"set_value: a/c/z/e", "set_value: a/c/x", "set_value: a/b")));
}
EXPECT_THAT(ListFuture(store, {KeyRange::Prefix("a/c/")}).result(),
::testing::Optional(::testing::UnorderedElementsAre(
MatchesListEntry("a/c/z/f"), MatchesListEntry("a/c/y"),
MatchesListEntry("a/c/z/e"), MatchesListEntry("a/c/x"))));
}
TEST(GcsKeyValueStoreTest, SpecRoundtrip) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.full_spec = {{"driver", kDriver}, {"bucket", "my-bucket"}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(GcsKeyValueStoreTest, InvalidSpec) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
EXPECT_THAT(
kvstore::Open(
{{"driver", kDriver}, {"bucket", "my-bucket"}, {"extra", "key"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Open({{"driver", kDriver}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open({{"driver", kDriver}, {"bucket", 5}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open(
{{"driver", kDriver}, {"bucket", "my-bucket"}, {"path", "a\tb"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid GCS path.*"));
}
TEST(GcsKeyValueStoreTest, RequestorPays) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket1("my-bucket1");
GCSMockStorageBucket bucket2("my-bucket2", "myproject");
mock_transport->buckets_.push_back(&bucket1);
mock_transport->buckets_.push_back(&bucket2);
const auto TestWrite = [&](Context context, auto bucket2_status_matcher) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store1, kvstore::Open({{"driver", kDriver},
{"bucket", "my-bucket1"},
{"context",
{
{"gcs_request_retries",
{{"max_retries", 3},
{"initial_delay", "1ms"},
{"max_delay", "10ms"}}},
}}},
context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open({{"driver", kDriver},
{"bucket", "my-bucket2"},
{"context",
{
{"gcs_request_retries",
{{"max_retries", 3},
{"initial_delay", "1ms"},
{"max_delay", "10ms"}}},
}}},
context)
.result());
TENSORSTORE_EXPECT_OK(kvstore::Write(store1, "abc", absl::Cord("xyz")));
EXPECT_THAT(kvstore::Write(store2, "abc", absl::Cord("xyz")).status(),
bucket2_status_matcher);
};
TestWrite(Context::Default(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
TestWrite(Context(Context::Spec::FromJson(
{{"gcs_user_project", {{"project_id", "badproject"}}}})
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument));
TestWrite(Context(Context::Spec::FromJson(
{{"gcs_user_project", {{"project_id", "myproject"}}}})
.value()),
absl::OkStatus());
}
TEST(GcsKeyValueStoreTest, DeletePrefix) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST(GcsKeyValueStoreTest, DeleteRange) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
bucket.SetErrorRate(0.02);
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST(GcsKeyValueStoreTest, DeleteRangeToEnd) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST(GcsKeyValueStoreTest, DeleteRangeFromBeginning) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
class MyDeleteRangeCancellationMockTransport : public MyMockTransport {
public:
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) final {
if (request.method == "DELETE") {
cancellation_notification_.WaitForNotification();
++total_delete_requests_;
}
MyMockTransport::IssueRequestWithHandler(request, std::move(options),
response_handler);
}
std::atomic<size_t> total_delete_requests_{0};
absl::Notification cancellation_notification_;
};
TEST(GcsKeyValueStoreTest, DeleteRangeCancellation) {
auto mock_transport =
std::make_shared<MyDeleteRangeCancellationMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open(
{
{"driver", kDriver},
{"bucket", "my-bucket"},
{"context", {{"gcs_request_concurrency", {{"limit", 1}}}}},
},
context)
.result());
for (std::string key : {"a/b", "a/c/a", "a/c/b", "a/c/d", "a/d"}) {
TENSORSTORE_ASSERT_OK(kvstore::Write(store, key, absl::Cord()));
}
{
[[maybe_unused]] auto future =
kvstore::DeleteRange(store, tensorstore::KeyRange{"a/ba", "a/ca"});
}
mock_transport->cancellation_notification_.Notify();
absl::SleepFor(absl::Milliseconds(100));
EXPECT_GE(1, mock_transport->total_delete_requests_.load());
EXPECT_THAT(ListFuture(store).result(),
::testing::Optional(::testing::SizeIs(::testing::Ge(4))));
}
class MyConcurrentMockTransport : public MyMockTransport {
public:
size_t reset() {
absl::MutexLock lock(&concurrent_request_mutex_);
cur_concurrent_requests_ = 0;
return std::exchange(max_concurrent_requests_, 0);
}
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) final {
auto parsed = tensorstore::internal::ParseGenericUri(request.url);
if (absl::StartsWith(parsed.authority_and_path,
"metadata.google.internal/")) {
MyMockTransport::IssueRequestWithHandler(request, std::move(options),
response_handler);
return;
}
{
absl::MutexLock lock(&concurrent_request_mutex_);
++cur_concurrent_requests_;
max_concurrent_requests_ =
std::max(max_concurrent_requests_, cur_concurrent_requests_);
}
auto op = tensorstore::PromiseFuturePair<HttpResponse>::Make();
ScheduleAt(absl::Now() + absl::Milliseconds(5),
[this, r = request, o = std::move(options), response_handler] {
absl::MutexLock lock(&concurrent_request_mutex_);
--cur_concurrent_requests_;
MyMockTransport::IssueRequestWithHandler(r, std::move(o),
response_handler);
});
}
size_t cur_concurrent_requests_ = 0;
size_t max_concurrent_requests_ = 0;
absl::Mutex concurrent_request_mutex_;
};
TEST(GcsKeyValueStoreTest, Concurrency) {
auto mock_transport = std::make_shared<MyConcurrentMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
const auto TestConcurrency = [&](size_t limit) {
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open(
{
{"driver", kDriver},
{"bucket", "my-bucket"},
{"context",
{{"gcs_request_concurrency", {{"limit", limit}}}}}
},
context)
.result());
std::vector<tensorstore::Future<kvstore::ReadResult>> futures;
for (size_t i = 0; i < 10 * limit; ++i) {
futures.push_back(kvstore::Read(store, "abc"));
}
for (const auto& future : futures) {
future.Wait();
}
};
TestConcurrency(1);
EXPECT_EQ(1, mock_transport->reset());
TestConcurrency(2);
EXPECT_EQ(2, mock_transport->reset());
TestConcurrency(3);
EXPECT_EQ(3, mock_transport->reset());
}
class MyRateLimitedMockTransport : public MyMockTransport {
public:
std::tuple<absl::Time, absl::Time, size_t> reset() {
absl::MutexLock l(&request_timing_mutex_);
return {min_time_, max_time_, std::exchange(count_, 0)};
}
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) final {
auto parsed = tensorstore::internal::ParseGenericUri(request.url);
if (absl::StartsWith(parsed.authority_and_path,
"metadata.google.internal/")) {
MyMockTransport::IssueRequestWithHandler(request, std::move(options),
response_handler);
return;
}
{
absl::MutexLock l(&request_timing_mutex_);
max_time_ = absl::Now();
if (count_++ == 0) {
min_time_ = max_time_;
}
}
MyMockTransport::IssueRequestWithHandler(request, std::move(options),
response_handler);
}
absl::Time min_time_;
absl::Time max_time_;
size_t count_;
absl::Mutex request_timing_mutex_;
};
TEST(GcsKeyValueStoreTest, RateLimited) {
auto mock_transport = std::make_shared<MyRateLimitedMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
const auto TestRateLimiting = [&](size_t limit) {
tensorstore::Context context{
tensorstore::Context::Spec::FromJson(
{
{"gcs_request_concurrency", {{"limit", 128}}},
{"data_copy_concurrency", {{"limit", 128}}},
{"experimental_gcs_rate_limiter",
{{"read_rate", limit},
{"write_rate", limit},
{"doubling_time", "20m"}}},
})
.value()};
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto store,
kvstore::Open(
{
{"driver", kDriver},
{"bucket", "my-bucket"},
},
context)
.result());
kvstore::Read(store, "xyz").Wait();
mock_transport->reset();
std::vector<tensorstore::Future<kvstore::ReadResult>> futures;
for (size_t i = 0; i < 100; ++i) {
futures.push_back(kvstore::Read(store, "abc"));
}
for (const auto& future : futures) {
future.Wait();
}
auto t = mock_transport->reset();
return std::get<1>(t) - std::get<0>(t);
};
[[maybe_unused]] auto a = TestRateLimiting(10);
[[maybe_unused]] auto b = TestRateLimiting(1000);
#if 0
EXPECT_THAT(b, testing::Lt(a));
#endif
}
TEST(GcsKeyValueStoreTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", kDriver}, {"bucket", "my-bucket"}, {"path", "abc"}},
"gs:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", kDriver}, {"bucket", "my-bucket"}, {"path", "abc def"}},
"gs:
}
TEST(GcsKeyValueStoreTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Invalid GCS bucket name: \"bucket:xyz\""));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Query string not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid GCS path.*"));
}
TEST(GcsKeyValueStoreTest, BatchRead) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::BatchReadGenericCoalescingTestOptions options;
options.coalescing_options = tensorstore::internal_kvstore_batch::
kDefaultRemoteStorageCoalescingOptions;
options.metric_prefix = "/tensorstore/kvstore/gcs/";
tensorstore::internal::TestBatchReadGenericCoalescing(store, options);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_http/gcs_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_http/gcs_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
12e48cfe-031f-436c-9713-e2ad67992e60 | cpp | google/tensorstore | object_metadata | tensorstore/kvstore/gcs_http/object_metadata.cc | tensorstore/kvstore/gcs_http/object_metadata_test.cc | #include "tensorstore/kvstore/gcs_http/object_metadata.h"
#include <stdint.h>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/http/http_header.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_kvstore_gcs_http {
using ::tensorstore::internal_http::TryParseIntHeader;
using ::tensorstore::internal_json_binding::DefaultInitializedValue;
namespace jb = tensorstore::internal_json_binding;
inline constexpr auto ObjectMetadataBinder = jb::Object(
jb::Member("name", jb::Projection(&ObjectMetadata::name)),
jb::Member("md5Hash", jb::Projection(&ObjectMetadata::md5_hash,
DefaultInitializedValue())),
jb::Member("crc32c", jb::Projection(&ObjectMetadata::crc32c,
DefaultInitializedValue())),
jb::Member("size", jb::Projection(&ObjectMetadata::size,
jb::DefaultInitializedValue(
jb::LooseValueAsBinder))),
jb::Member("generation", jb::Projection(&ObjectMetadata::generation,
jb::DefaultInitializedValue(
jb::LooseValueAsBinder))),
jb::Member("metageneration", jb::Projection(&ObjectMetadata::metageneration,
jb::DefaultInitializedValue(
jb::LooseValueAsBinder))),
jb::Member("timeCreated", jb::Projection(&ObjectMetadata::time_created,
jb::DefaultValue([](auto* x) {
*x = absl::InfinitePast();
}))),
jb::Member("updated", jb::Projection(&ObjectMetadata::updated,
jb::DefaultValue([](auto* x) {
*x = absl::InfinitePast();
}))),
jb::Member("timeDeleted", jb::Projection(&ObjectMetadata::time_deleted,
jb::DefaultValue([](auto* x) {
*x = absl::InfinitePast();
}))),
jb::DiscardExtraMembers);
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ObjectMetadata,
[](auto is_loading, const auto& options,
auto* obj, ::nlohmann::json* j) {
return ObjectMetadataBinder(
is_loading, options, obj, j);
})
void SetObjectMetadataFromHeaders(
const absl::btree_multimap<std::string, std::string>& headers,
ObjectMetadata* result) {
result->size =
TryParseIntHeader<uint64_t>(headers, "content-length").value_or(0);
result->generation =
TryParseIntHeader<int64_t>(headers, "x-goog-generation").value_or(0);
result->metageneration =
TryParseIntHeader<uint64_t>(headers, "x-goog-metageneration").value_or(0);
auto it = headers.find("x-goog-hash");
if (it != headers.end()) {
for (std::string_view kv : absl::StrSplit(it->second, absl::ByChar(','))) {
std::pair<std::string_view, std::string_view> split =
absl::StrSplit(kv, absl::MaxSplits('=', 1));
if (split.first == "crc32c") {
result->crc32c = std::string(split.second);
} else if (split.first == "md5") {
result->md5_hash = std::string(split.second);
}
}
}
}
Result<ObjectMetadata> ParseObjectMetadata(std::string_view source) {
auto json = internal::ParseJson(source);
if (json.is_discarded()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Failed to parse object metadata: ", source));
}
return jb::FromJson<ObjectMetadata>(std::move(json));
}
}
} | #include "tensorstore/kvstore/gcs_http/object_metadata.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::internal_kvstore_gcs_http::ParseObjectMetadata;
const char kObjectMetadata[] = R"""({
"acl": [{
"kind": "storage#objectAccessControl",
"id": "acl-id-0",
"selfLink": "https:
"bucket": "foo-bar",
"object": "foo",
"generation": 12345,
"entity": "user-qux",
"role": "OWNER",
"email": "qux@example.com",
"entityId": "user-qux-id-123",
"domain": "example.com",
"projectTeam": {
"projectNumber": "4567",
"team": "owners"
},
"etag": "AYX="
}, {
"kind": "storage#objectAccessControl",
"id": "acl-id-1",
"selfLink": "https:
"bucket": "foo-bar",
"object": "foo",
"generation": 12345,
"entity": "user-quux",
"role": "READER",
"email": "qux@example.com",
"entityId": "user-quux-id-123",
"domain": "example.com",
"projectTeam": {
"projectNumber": "4567",
"team": "viewers"
},
"etag": "AYX="
}
],
"bucket": "foo-bar",
"cacheControl": "no-cache",
"componentCount": 7,
"contentDisposition": "a-disposition",
"contentEncoding": "an-encoding",
"contentLanguage": "a-language",
"contentType": "application/octet-stream",
"crc32c": "deadbeef",
"customerEncryption": {
"encryptionAlgorithm": "some-algo",
"keySha256": "abc123"
},
"etag": "XYZ=",
"eventBasedHold": true,
"generation": "12345",
"id": "foo-bar/baz/12345",
"kind": "storage#object",
"kmsKeyName": "/foo/bar/baz/key",
"md5Hash": "deaderBeef=",
"mediaLink": "https:
"metadata": {
"foo": "bar",
"baz": "qux"
},
"metageneration": "4",
"name": "baz",
"owner": {
"entity": "user-qux",
"entityId": "user-qux-id-123"
},
"retentionExpirationTime": "2019-01-01T00:00:00Z",
"selfLink": "https:
"size": 102400,
"storageClass": "STANDARD",
"temporaryHold": true,
"timeCreated": "2018-05-19T19:31:14Z",
"timeDeleted": "2018-05-19T19:32:24Z",
"timeStorageClassUpdated": "2018-05-19T19:31:34Z",
"updated": "2018-05-19T19:31:24Z"
})""";
absl::Time AsTime(const std::string& time) {
absl::Time result;
if (absl::ParseTime(absl::RFC3339_full, time, &result, nullptr)) {
return result;
}
return absl::InfinitePast();
}
TEST(ParseObjectMetadata, Basic) {
EXPECT_FALSE(ParseObjectMetadata("").ok());
auto result = ParseObjectMetadata(kObjectMetadata);
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("baz", result->name);
EXPECT_EQ("deaderBeef=", result->md5_hash);
EXPECT_EQ(102400u, result->size);
EXPECT_EQ(12345, result->generation);
EXPECT_EQ(4, result->metageneration);
EXPECT_EQ(AsTime("2018-05-19T12:31:14-07:00"), result->time_created);
EXPECT_EQ(AsTime("2018-05-19T12:31:24-07:00"), result->updated);
EXPECT_EQ(AsTime("2018-05-19T12:32:24-07:00"), result->time_deleted);
}
const char kObjectMetadata2[] = R"""({
"name": "fafb_v14/fafb_v14_clahe/128_128_160/0-64_1408-1472_896-960",
"kind": "storage#object",
"id": "neuroglancer-fafb-data/fafb_v14/fafb_v14_clahe/128_128_160/0-64_1408-1472_896-960/1540426531840872",
"bucket": "neuroglancer-fafb-data",
"generation": "1540426531840872",
"contentType": "image/jpeg",
"timeCreated": "2018-10-25T00:15:31.840Z",
"updated": "2018-10-25T00:15:31.840Z",
"timeStorageClassUpdated": "2018-10-25T00:15:31.840Z",
"size": "3404"
})""";
TEST(ParseObjectMetadata, Example2) {
EXPECT_FALSE(ParseObjectMetadata("").ok());
auto result = ParseObjectMetadata(kObjectMetadata2);
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("fafb_v14/fafb_v14_clahe/128_128_160/0-64_1408-1472_896-960",
result->name);
EXPECT_EQ(3404u, result->size);
EXPECT_EQ(1540426531840872, result->generation);
EXPECT_EQ(AsTime("2018-10-24T17:15:31.84-07:00"), result->time_created);
EXPECT_EQ(AsTime("2018-10-24T17:15:31.84-07:00"), result->updated);
EXPECT_EQ(0, result->metageneration);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_http/object_metadata.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_http/object_metadata_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
fe75d615-573a-4be2-9609-b996505d1261 | cpp | google/tensorstore | memory_key_value_store | tensorstore/kvstore/memory/memory_key_value_store.cc | tensorstore/kvstore/memory/memory_key_value_store_test.cc | #include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/transaction.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::internal_kvstore::DeleteRangeEntry;
using ::tensorstore::internal_kvstore::kReadModifyWrite;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::kvstore::SupportedFeatures;
TimestampedStorageGeneration GenerationNow(StorageGeneration generation) {
return TimestampedStorageGeneration{std::move(generation), absl::Now()};
}
struct StoredKeyValuePairs
: public internal::AtomicReferenceCount<StoredKeyValuePairs> {
using Ptr = internal::IntrusivePtr<StoredKeyValuePairs>;
struct ValueWithGenerationNumber {
absl::Cord value;
uint64_t generation_number;
StorageGeneration generation() const {
return StorageGeneration::FromUint64(generation_number);
}
};
using Map = absl::btree_map<std::string, ValueWithGenerationNumber>;
std::pair<Map::iterator, Map::iterator> Find(const std::string& inclusive_min,
const std::string& exclusive_max)
ABSL_SHARED_LOCKS_REQUIRED(mutex) {
return {values.lower_bound(inclusive_min),
exclusive_max.empty() ? values.end()
: values.lower_bound(exclusive_max)};
}
std::pair<Map::iterator, Map::iterator> Find(const KeyRange& range)
ABSL_SHARED_LOCKS_REQUIRED(mutex) {
return Find(range.inclusive_min, range.exclusive_max);
}
absl::Mutex mutex;
uint64_t next_generation_number ABSL_GUARDED_BY(mutex) = 0;
Map values ABSL_GUARDED_BY(mutex);
};
struct MemoryKeyValueStoreResource
: public internal::ContextResourceTraits<MemoryKeyValueStoreResource> {
constexpr static char id[] = "memory_key_value_store";
struct Spec {};
using Resource = StoredKeyValuePairs::Ptr;
static Spec Default() { return {}; }
static constexpr auto JsonBinder() { return jb::Object(); }
static Result<Resource> Create(
Spec, internal::ContextResourceCreationContext context) {
return StoredKeyValuePairs::Ptr(new StoredKeyValuePairs);
}
static Spec GetSpec(const Resource&,
const internal::ContextSpecBuilder& builder) {
return {};
}
};
const internal::ContextResourceRegistration<MemoryKeyValueStoreResource>
resource_registration;
struct MemoryDriverSpecData {
Context::Resource<MemoryKeyValueStoreResource> memory_key_value_store;
bool atomic = true;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.memory_key_value_store, x.atomic);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(
MemoryKeyValueStoreResource::id,
jb::Projection<&MemoryDriverSpecData::memory_key_value_store>()),
jb::Member("atomic", jb::Projection<&MemoryDriverSpecData::atomic>(
jb::DefaultValue([](auto* y) { *y = true; }))));
};
class MemoryDriverSpec
: public internal_kvstore::RegisteredDriverSpec<MemoryDriverSpec,
MemoryDriverSpecData> {
public:
static constexpr char id[] = "memory";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return tensorstore::StrCat(id, ":
}
};
class MemoryDriver
: public internal_kvstore::RegisteredDriver<MemoryDriver,
MemoryDriverSpec> {
public:
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
Future<const void> DeleteRange(KeyRange range) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override;
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) override;
class TransactionNode;
StoredKeyValuePairs& data() { return **spec_.memory_key_value_store; }
absl::Status GetBoundSpecData(MemoryDriverSpecData& spec) const {
spec = spec_;
return absl::Status();
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
SpecData spec_;
};
Future<kvstore::DriverPtr> MemoryDriverSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<MemoryDriver>();
driver->spec_ = data_;
return driver;
}
using BufferedReadModifyWriteEntry =
internal_kvstore::AtomicMultiPhaseMutation::BufferedReadModifyWriteEntry;
class MemoryDriver::TransactionNode
: public internal_kvstore::AtomicTransactionNode {
using Base = internal_kvstore::AtomicTransactionNode;
public:
using Base::Base;
void AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) override
ABSL_NO_THREAD_SAFETY_ANALYSIS {
if (!single_phase_mutation.remaining_entries_.HasError()) {
auto& data = static_cast<MemoryDriver&>(*this->driver()).data();
TimestampedStorageGeneration generation;
UniqueWriterLock lock(data.mutex);
absl::Time commit_time = absl::Now();
if (!ValidateEntryConditions(data, single_phase_mutation, commit_time)) {
lock.unlock();
this->RetryAtomicWriteback(commit_time);
return;
}
ApplyMutation(data, single_phase_mutation, commit_time);
lock.unlock();
this->AtomicCommitWritebackSuccess();
} else {
internal_kvstore::WritebackError(single_phase_mutation);
}
MultiPhaseMutation::AllEntriesDone(single_phase_mutation);
}
static bool ValidateEntryConditions(
StoredKeyValuePairs& data,
internal_kvstore::SinglePhaseMutation& single_phase_mutation,
const absl::Time& commit_time) ABSL_SHARED_LOCKS_REQUIRED(data.mutex) {
bool validated = true;
for (auto& entry : single_phase_mutation.entries_) {
if (!ValidateEntryConditions(data, entry, commit_time)) {
validated = false;
}
}
return validated;
}
static bool ValidateEntryConditions(StoredKeyValuePairs& data,
internal_kvstore::MutationEntry& entry,
const absl::Time& commit_time)
ABSL_SHARED_LOCKS_REQUIRED(data.mutex) {
if (entry.entry_type() == kReadModifyWrite) {
return ValidateEntryConditions(
data, static_cast<BufferedReadModifyWriteEntry&>(entry), commit_time);
}
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
bool validated = true;
for (auto& deleted_entry : dr_entry.superseded_) {
if (!ValidateEntryConditions(
data, static_cast<BufferedReadModifyWriteEntry&>(deleted_entry),
commit_time)) {
validated = false;
}
}
return validated;
}
static bool ValidateEntryConditions(StoredKeyValuePairs& data,
BufferedReadModifyWriteEntry& entry,
const absl::Time& commit_time)
ABSL_SHARED_LOCKS_REQUIRED(data.mutex) {
auto& stamp = entry.stamp();
auto if_equal = StorageGeneration::Clean(stamp.generation);
if (StorageGeneration::IsUnknown(if_equal)) {
assert(stamp.time == absl::InfiniteFuture());
return true;
}
auto it = data.values.find(entry.key_);
if (it == data.values.end()) {
if (StorageGeneration::IsNoValue(if_equal)) {
stamp.time = commit_time;
return true;
}
} else if (if_equal == it->second.generation()) {
stamp.time = commit_time;
return true;
}
return false;
}
static void ApplyMutation(
StoredKeyValuePairs& data,
internal_kvstore::SinglePhaseMutation& single_phase_mutation,
const absl::Time& commit_time) ABSL_EXCLUSIVE_LOCKS_REQUIRED(data.mutex) {
for (auto& entry : single_phase_mutation.entries_) {
if (entry.entry_type() == kReadModifyWrite) {
auto& rmw_entry = static_cast<BufferedReadModifyWriteEntry&>(entry);
auto& stamp = rmw_entry.stamp();
stamp.time = commit_time;
auto value_state = rmw_entry.value_state_;
if (!StorageGeneration::IsDirty(stamp.generation)) {
} else if (value_state == ReadResult::kMissing) {
data.values.erase(rmw_entry.key_);
stamp.generation = StorageGeneration::NoValue();
} else {
assert(value_state == ReadResult::kValue);
auto& v = data.values[rmw_entry.key_];
v.generation_number = data.next_generation_number++;
v.value = std::move(rmw_entry.value_);
stamp.generation = v.generation();
}
} else {
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
auto it_range = data.Find(dr_entry.key_, dr_entry.exclusive_max_);
data.values.erase(it_range.first, it_range.second);
}
}
}
};
Future<ReadResult> MemoryDriver::Read(Key key, ReadOptions options) {
auto& data = this->data();
absl::ReaderMutexLock lock(&data.mutex);
auto& values = data.values;
auto it = values.find(key);
if (it == values.end()) {
return ReadResult::Missing(GenerationNow(StorageGeneration::NoValue()));
}
auto stamp = GenerationNow(it->second.generation());
if (!options.generation_conditions.Matches(it->second.generation())) {
return ReadResult::Unspecified(std::move(stamp));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range, options.byte_range.Validate(it->second.value.size()));
return ReadResult::Value(internal::GetSubCord(it->second.value, byte_range),
std::move(stamp));
}
Future<TimestampedStorageGeneration> MemoryDriver::Write(
Key key, std::optional<Value> value, WriteOptions options) {
using ValueWithGenerationNumber =
StoredKeyValuePairs::ValueWithGenerationNumber;
auto& data = this->data();
absl::WriterMutexLock lock(&data.mutex);
auto& values = data.values;
auto it = values.find(key);
if (it == values.end()) {
if (!options.generation_conditions.MatchesNoValue()) {
return GenerationNow(StorageGeneration::Unknown());
}
if (!value) {
return GenerationNow(StorageGeneration::NoValue());
}
it = values
.emplace(std::move(key),
ValueWithGenerationNumber{*std::move(value),
data.next_generation_number++})
.first;
return GenerationNow(it->second.generation());
}
if (!options.generation_conditions.Matches(it->second.generation())) {
return GenerationNow(StorageGeneration::Unknown());
}
if (!value) {
values.erase(it);
return GenerationNow(StorageGeneration::NoValue());
}
it->second.generation_number = data.next_generation_number++;
it->second.value = *std::move(value);
return GenerationNow(it->second.generation());
}
Future<const void> MemoryDriver::DeleteRange(KeyRange range) {
auto& data = this->data();
absl::WriterMutexLock lock(&data.mutex);
if (!range.empty()) {
auto it_range = data.Find(range);
data.values.erase(it_range.first, it_range.second);
}
return absl::OkStatus();
}
void MemoryDriver::ListImpl(ListOptions options, ListReceiver receiver) {
auto& data = this->data();
std::atomic<bool> cancelled{false};
execution::set_starting(receiver, [&cancelled] {
cancelled.store(true, std::memory_order_relaxed);
});
std::vector<ListEntry> entries;
{
absl::ReaderMutexLock lock(&data.mutex);
auto it_range = data.Find(options.range);
for (auto it = it_range.first; it != it_range.second; ++it) {
if (cancelled.load(std::memory_order_relaxed)) break;
std::string_view key = it->first;
entries.push_back(ListEntry{
std::string(
key.substr(std::min(options.strip_prefix_length, key.size()))),
ListEntry::checked_size(it->second.value.size()),
});
}
}
for (auto& entry : entries) {
if (cancelled.load(std::memory_order_relaxed)) break;
execution::set_value(receiver, std::move(entry));
}
execution::set_done(receiver);
execution::set_stopping(receiver);
}
absl::Status MemoryDriver::ReadModifyWrite(
internal::OpenTransactionPtr& transaction, size_t& phase, Key key,
ReadModifyWriteSource& source) {
if (!spec_.atomic) {
return Driver::ReadModifyWrite(transaction, phase, std::move(key), source);
}
return internal_kvstore::AddReadModifyWrite<TransactionNode>(
this, transaction, phase, std::move(key), source);
}
absl::Status MemoryDriver::TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) {
if (!spec_.atomic) {
return Driver::TransactionalDeleteRange(transaction, std::move(range));
}
return internal_kvstore::AddDeleteRange<TransactionNode>(this, transaction,
std::move(range));
}
Result<kvstore::Spec> ParseMemoryUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == tensorstore::MemoryDriverSpec::id);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
auto driver_spec = internal::MakeIntrusivePtr<MemoryDriverSpec>();
driver_spec->data_.memory_key_value_store =
Context::Resource<MemoryKeyValueStoreResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec),
internal::PercentDecode(parsed.authority_and_path)};
}
}
kvstore::DriverPtr GetMemoryKeyValueStore(bool atomic) {
auto ptr = internal::MakeIntrusivePtr<MemoryDriver>();
ptr->spec_.memory_key_value_store =
Context::Default().GetResource<MemoryKeyValueStoreResource>().value();
ptr->spec_.atomic = atomic;
return ptr;
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(tensorstore::MemoryDriver)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::MemoryDriverSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{tensorstore::MemoryDriverSpec::id,
tensorstore::ParseMemoryUrl};
} | #include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::serialization::SerializationRoundTrip;
TEST(MemoryKeyValueStoreTest, Basic) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST(MemoryKeyValueStoreTest, DeletePrefix) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST(MemoryKeyValueStoreTest, DeleteRange) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST(MemoryKeyValueStoreTest, DeleteRangeToEnd) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST(MemoryKeyValueStoreTest, DeleteRangeFromBeginning) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
#if 0
TEST(MemoryKeyValueStoreTest, CopyRange) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreCopyRange(store);
}
#endif
TEST(MemoryKeyValueStoreTest, List) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreList(store);
}
TEST(MemoryKeyValueStoreTest, Open) {
auto context = Context::Default();
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "memory"}}, context).result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "key", absl::Cord("value")));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open({{"driver", "memory"}}, context).result());
EXPECT_THAT(kvstore::Read(store2, "key").result(),
MatchesKvsReadResult(absl::Cord("value")));
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto other_context, Context::FromJson({{"memory_key_value_store",
::nlohmann::json::object_t{}}},
context));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store3,
kvstore::Open({{"driver", "memory"}}, other_context).result());
EXPECT_THAT(kvstore::Read(store3, "key").result(),
MatchesKvsReadResultNotFound());
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "memory"}}, context).result());
EXPECT_EQ("value", kvstore::Read(store, "key").value().value);
}
}
TEST(MemoryKeyValueStoreTest, ListWithPath) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "memory"}, {"path", "p/"}}, context).result());
tensorstore::internal::TestKeyValueStoreList(store);
}
TEST(MemoryKeyValueStoreTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.full_spec = {
{"driver", "memory"},
};
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(MemoryKeyValueStoreTest, SpecRoundtripWithContextSpec) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.spec_request_options.Set(tensorstore::unbind_context).IgnoreError();
options.full_spec = {
{"driver", "memory"},
{"memory_key_value_store", "memory_key_value_store#a"},
{"context",
{
{"memory_key_value_store#a", ::nlohmann::json::object_t()},
}},
};
options.check_data_persists = false;
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(MemoryKeyValueStoreTest, InvalidSpec) {
auto context = tensorstore::Context::Default();
EXPECT_THAT(
kvstore::Open({{"driver", "memory"}, {"extra", "key"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(MemoryKeyValueStoreTest, BoundSpec) {
auto context = tensorstore::Context::Default();
::nlohmann::json json_spec{{"driver", "memory"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
kvstore::Spec::FromJson(json_spec));
TENSORSTORE_ASSERT_OK(spec.BindContext(context));
std::string bound_spec_cache_key;
tensorstore::internal::EncodeCacheKey(&bound_spec_cache_key, spec.driver);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store, kvstore::Open(spec).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_spec,
store.spec(tensorstore::retain_context));
std::string store_cache_key;
tensorstore::internal::EncodeCacheKey(&store_cache_key, store.driver);
EXPECT_EQ(bound_spec_cache_key, store_cache_key);
new_spec.StripContext();
EXPECT_THAT(new_spec.ToJson(tensorstore::IncludeDefaults{false}),
::testing::Optional(json_spec));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open(json_spec, context).result());
std::string store2_cache_key;
tensorstore::internal::EncodeCacheKey(&store2_cache_key, store2.driver);
EXPECT_EQ(store_cache_key, store2_cache_key);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2,
kvstore::Open(
{{"driver", "memory"},
{"context",
{{"memory_key_value_store#a", "memory_key_value_store"}}},
{"memory_key_value_store", "memory_key_value_store#a"}},
context)
.result());
std::string store2_cache_key;
tensorstore::internal::EncodeCacheKey(&store2_cache_key, store2.driver);
EXPECT_EQ(store_cache_key, store2_cache_key);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store3,
kvstore::Open(json_spec).result());
std::string store3_cache_key;
tensorstore::internal::EncodeCacheKey(&store3_cache_key, store3.driver);
EXPECT_NE(store_cache_key, store3_cache_key);
}
}
TEST(MemoryKeyValueStoreTest, OpenCache) {
auto context = tensorstore::Context::Default();
::nlohmann::json json_spec{{"driver", "memory"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store1,
kvstore::Open(json_spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store2,
kvstore::Open(json_spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store3,
kvstore::Open(json_spec).result());
EXPECT_EQ(store1.driver.get(), store2.driver.get());
EXPECT_NE(store1.driver.get(), store3.driver.get());
std::string cache_key1, cache_key3;
tensorstore::internal::EncodeCacheKey(&cache_key1, store1.driver);
tensorstore::internal::EncodeCacheKey(&cache_key3, store3.driver);
EXPECT_NE(cache_key1, cache_key3);
}
TEST(MemoryKeyValueStoreTest, ContextBinding) {
auto context1 = Context::Default();
auto context2 = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_spec, kvstore::Spec::FromJson({{"driver", "memory"}}));
auto base_spec1 = base_spec;
TENSORSTORE_ASSERT_OK(base_spec1.Set(context1));
EXPECT_THAT(
base_spec1.ToJson(),
::testing::Optional(MatchesJson(
{{"driver", "memory"},
{"context",
{{"memory_key_value_store", ::nlohmann::json::object_t()}}}})));
auto base_spec2 = base_spec;
TENSORSTORE_ASSERT_OK(base_spec2.Set(context2));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store1,
kvstore::Open(base_spec, context1).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store2,
kvstore::Open(base_spec, context2).result());
ASSERT_NE(store1.driver, store2.driver);
EXPECT_THAT(kvstore::Open(base_spec1).result(), ::testing::Optional(store1));
EXPECT_THAT(kvstore::Open(base_spec2).result(), ::testing::Optional(store2));
auto base_spec3 = base_spec1;
TENSORSTORE_ASSERT_OK(base_spec3.Set(context2));
EXPECT_THAT(kvstore::Open(base_spec3).result(), ::testing::Optional(store1));
TENSORSTORE_ASSERT_OK(base_spec3.Set(tensorstore::strip_context, context2));
EXPECT_THAT(kvstore::Open(base_spec3).result(), ::testing::Optional(store2));
}
TEST(MemoryKeyValueStoreTest, SpecSerialization) {
::nlohmann::json json_spec{{"driver", "memory"}, {"path", "abc/"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
kvstore::Spec::FromJson(json_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec_roundtripped,
SerializationRoundTrip(spec));
EXPECT_THAT(spec_roundtripped.ToJson(),
::testing::Optional(MatchesJson(json_spec)));
}
TEST(MemoryKeyValueStoreTest, KvStoreSerialization) {
::nlohmann::json json_spec{{"driver", "memory"}, {"path", "abc/"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open(json_spec).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store_roundtripped,
SerializationRoundTrip(store));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec_roundtripped,
store_roundtripped.spec());
EXPECT_THAT(spec_roundtripped.ToJson(),
::testing::Optional(MatchesJson(json_spec)));
}
TEST(MemoryKeyValueStoreTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip({{"driver", "memory"}},
"memory:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "memory"}, {"path", "abc/"}}, "memory:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "memory"}, {"path", "abc def/"}}, "memory:
}
TEST(MemoryKeyValueStoreTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("memory:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Query string not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("memory:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/memory/memory_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/memory/memory_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b2be1087-b57b-4417-9ce3-1d4c196ea137 | cpp | google/tensorstore | file_key_value_store | tensorstore/kvstore/file/file_key_value_store.cc | tensorstore/kvstore/file/file_key_value_store_test.cc | #include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/functional/function_ref.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/context_binding.h"
#include "tensorstore/internal/file_io_concurrency_resource.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/os/error_code.h"
#include "tensorstore/internal/os/unique_handle.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/file/file_resource.h"
#include "tensorstore/kvstore/file/util.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/internal/os/file_lister.h"
#include "tensorstore/internal/os/file_util.h"
using ::tensorstore::internal::OsErrorCode;
using ::tensorstore::internal_file_util::IsKeyValid;
using ::tensorstore::internal_file_util::LongestDirectoryPrefix;
using ::tensorstore::internal_os::FileDescriptor;
using ::tensorstore::internal_os::FileInfo;
using ::tensorstore::internal_os::kLockSuffix;
using ::tensorstore::internal_os::UniqueFileDescriptor;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::kvstore::SupportedFeatures;
namespace tensorstore {
namespace internal_file_kvstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct FileMetrics : public internal_kvstore::CommonMetrics {
internal_metrics::Counter<int64_t>& open_read;
internal_metrics::Counter<int64_t>& lock_contention;
};
auto file_metrics = []() -> FileMetrics {
return {TENSORSTORE_KVSTORE_COMMON_METRICS(file),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
file, open_read, "Number of times a file is opened for reading"),
TENSORSTORE_KVSTORE_COUNTER_IMPL(file, lock_contention,
" kvstore::Write lock contention")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag file_logging("file");
bool IsFileKvstorePathValid(std::string_view path) {
if (path.empty() || path == "/") return true;
if (path.back() == '/' || path.back() == '\\') {
path.remove_suffix(1);
}
return IsKeyValid(path, kLockSuffix);
}
struct FileKeyValueStoreSpecData {
Context::Resource<internal::FileIoConcurrencyResource> file_io_concurrency;
Context::Resource<FileIoSyncResource> file_io_sync;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.file_io_concurrency, x.file_io_sync);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(
internal::FileIoConcurrencyResource::id,
jb::Projection<&FileKeyValueStoreSpecData::file_io_concurrency>()),
jb::Member(FileIoSyncResource::id,
jb::Projection<&FileKeyValueStoreSpecData::file_io_sync>())
);
};
class FileKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<FileKeyValueStoreSpec,
FileKeyValueStoreSpecData> {
public:
static constexpr char id[] = "file";
absl::Status NormalizeSpec(std::string& path) override {
if (!IsFileKvstorePathValid(path)) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid file path: ", QuoteString(path)));
}
path = internal::LexicalNormalizePath(path);
return absl::OkStatus();
}
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return absl::StrCat(id, ":
}
};
class FileKeyValueStore
: public internal_kvstore::RegisteredDriver<FileKeyValueStore,
FileKeyValueStoreSpec> {
public:
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
Future<const void> DeleteRange(KeyRange range) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
const Executor& executor() { return spec_.file_io_concurrency->executor; }
std::string DescribeKey(std::string_view key) override {
return absl::StrCat("local file ", QuoteString(key));
}
absl::Status GetBoundSpecData(FileKeyValueStoreSpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
bool sync() const { return *spec_.file_io_sync; }
SpecData spec_;
};
absl::Status ValidateKey(std::string_view key) {
if (!IsKeyValid(key, kLockSuffix)) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid key: ", QuoteString(key)));
}
return absl::OkStatus();
}
absl::Status ValidateKeyRange(const KeyRange& range) {
auto prefix = LongestDirectoryPrefix(range);
if (prefix.empty()) return absl::OkStatus();
return ValidateKey(prefix);
}
StorageGeneration GetFileGeneration(const FileInfo& info) {
return StorageGeneration::FromValues(
internal_os::GetDeviceId(info), internal_os::GetFileId(info),
absl::ToUnixNanos(internal_os::GetMTime(info)));
}
Result<UniqueFileDescriptor> OpenParentDirectory(std::string path) {
size_t end_pos = path.size();
Result<UniqueFileDescriptor> fd;
while (true) {
size_t separator_pos = end_pos;
while (separator_pos != 0 &&
!internal_os::IsDirSeparator(path[separator_pos - 1])) {
--separator_pos;
}
--separator_pos;
const char* dir_path;
if (separator_pos == std::string::npos) {
dir_path = ".";
} else if (separator_pos == 0) {
dir_path = "/";
} else {
path[separator_pos] = '\0';
dir_path = path.c_str();
end_pos = separator_pos;
}
fd = internal_os::OpenDirectoryDescriptor(dir_path);
if (!fd.ok()) {
if (absl::IsNotFound(fd.status())) {
assert(separator_pos != 0 && separator_pos != std::string::npos);
end_pos = separator_pos - 1;
continue;
}
return fd.status();
}
if (dir_path == path.c_str()) path[separator_pos] = '/';
break;
}
while (true) {
size_t separator_pos = path.find('\0', end_pos);
if (separator_pos == std::string::npos) {
return fd;
}
TENSORSTORE_RETURN_IF_ERROR(internal_os::MakeDirectory(path));
fd = internal_os::OpenDirectoryDescriptor(path);
TENSORSTORE_RETURN_IF_ERROR(fd.status());
path[separator_pos] = '/';
end_pos = separator_pos + 1;
}
}
Result<UniqueFileDescriptor> OpenValueFile(const std::string& path,
StorageGeneration* generation,
int64_t* size = nullptr) {
auto fd = internal_os::OpenExistingFileForReading(path);
if (!fd.ok()) {
if (absl::IsNotFound(fd.status())) {
*generation = StorageGeneration::NoValue();
return UniqueFileDescriptor{};
}
return fd;
}
FileInfo info;
TENSORSTORE_RETURN_IF_ERROR(internal_os::GetFileInfo(fd->get(), &info));
if (!internal_os::IsRegularFile(info)) {
return absl::FailedPreconditionError(
absl::StrCat("Not a regular file: ", QuoteString(path)));
}
if (size) *size = internal_os::GetSize(info);
*generation = GetFileGeneration(info);
return fd;
}
Result<absl::Cord> ReadFromFileDescriptor(FileDescriptor fd,
ByteRange byte_range) {
file_metrics.batch_read.Increment();
absl::Time start_time = absl::Now();
internal::FlatCordBuilder buffer(byte_range.size(), false);
size_t offset = 0;
while (offset < buffer.size()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto n, internal_os::ReadFromFile(fd, buffer.data() + offset,
buffer.size() - offset,
byte_range.inclusive_min + offset));
if (n > 0) {
file_metrics.bytes_read.IncrementBy(n);
offset += n;
buffer.set_inuse(offset);
continue;
}
if (n == 0) {
return absl::UnavailableError("Length changed while reading");
}
}
file_metrics.read_latency_ms.Observe(
absl::ToInt64Milliseconds(absl::Now() - start_time));
return std::move(buffer).Build();
}
class BatchReadTask;
using BatchReadTaskBase = internal_kvstore_batch::BatchReadEntry<
FileKeyValueStore,
internal_kvstore_batch::ReadRequest<kvstore::ReadGenerationConditions>,
std::string >;
class BatchReadTask final
: public BatchReadTaskBase,
public internal::AtomicReferenceCount<BatchReadTask> {
private:
TimestampedStorageGeneration stamp_;
UniqueFileDescriptor fd_;
int64_t size_;
public:
BatchReadTask(BatchEntryKey&& batch_entry_key_)
: BatchReadTaskBase(std::move(batch_entry_key_)),
internal::AtomicReferenceCount<BatchReadTask>(1) {
}
void Submit(Batch::View batch) final {
if (request_batch.requests.empty()) return;
driver().executor()(
[self = internal::IntrusivePtr<BatchReadTask>(
this, internal::adopt_object_ref)] { self->ProcessBatch(); });
}
Result<kvstore::ReadResult> DoByteRangeRead(ByteRange byte_range) {
absl::Cord value;
TENSORSTORE_ASSIGN_OR_RETURN(
value, ReadFromFileDescriptor(fd_.get(), byte_range),
tensorstore::MaybeAnnotateStatus(_, "Error reading from open file"));
return kvstore::ReadResult::Value(std::move(value), stamp_);
}
void ProcessBatch() {
ABSL_LOG_IF(INFO, file_logging)
<< "BatchReadTask " << std::get<std::string>(batch_entry_key);
stamp_.time = absl::Now();
file_metrics.open_read.Increment();
auto& requests = request_batch.requests;
TENSORSTORE_ASSIGN_OR_RETURN(
fd_,
OpenValueFile(std::get<std::string>(batch_entry_key),
&stamp_.generation, &size_),
internal_kvstore_batch::SetCommonResult(requests, std::move(_)));
if (!fd_.valid()) {
internal_kvstore_batch::SetCommonResult(
requests, kvstore::ReadResult::Missing(stamp_.time));
return;
}
internal_kvstore_batch::ValidateGenerationsAndByteRanges(requests, stamp_,
size_);
if (requests.empty()) return;
if (requests.size() == 1) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(requests[0]);
byte_range_request.promise.SetResult(
DoByteRangeRead(byte_range_request.byte_range.AsByteRange()));
return;
}
const auto& executor = driver().executor();
internal_kvstore_batch::CoalescingOptions coalescing_options;
coalescing_options.max_extra_read_bytes = 255;
internal_kvstore_batch::ForEachCoalescedRequest<Request>(
requests, coalescing_options,
[&](ByteRange coalesced_byte_range,
tensorstore::span<Request> coalesced_requests) {
auto self = internal::IntrusivePtr<BatchReadTask>(this);
executor([self = std::move(self), coalesced_byte_range,
coalesced_requests] {
self->ProcessCoalescedRead(coalesced_byte_range,
coalesced_requests);
});
});
}
void ProcessCoalescedRead(ByteRange coalesced_byte_range,
tensorstore::span<Request> coalesced_requests) {
TENSORSTORE_ASSIGN_OR_RETURN(auto read_result,
DoByteRangeRead(coalesced_byte_range),
internal_kvstore_batch::SetCommonResult(
coalesced_requests, std::move(_)));
internal_kvstore_batch::ResolveCoalescedRequests(
coalesced_byte_range, coalesced_requests, std::move(read_result));
}
};
Future<ReadResult> FileKeyValueStore::Read(Key key, ReadOptions options) {
file_metrics.read.Increment();
TENSORSTORE_RETURN_IF_ERROR(ValidateKey(key));
auto [promise, future] = PromiseFuturePair<kvstore::ReadResult>::Make();
BatchReadTask::MakeRequest<BatchReadTask>(
*this, {std::move(key)}, options.batch, options.staleness_bound,
BatchReadTask::Request{{std::move(promise), options.byte_range},
std::move(options.generation_conditions)});
return std::move(future);
}
Result<StorageGeneration> WriteWithSyncAndRename(
FileDescriptor fd, const std::string& fd_path, absl::Cord value, bool sync,
const std::string& rename_path) {
auto start_write = absl::Now();
while (!value.empty()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto n, internal_os::WriteCordToFile(fd, value),
MaybeAnnotateStatus(
_, absl::StrCat("Failed writing: ", QuoteString(fd_path))));
file_metrics.bytes_written.IncrementBy(n);
if (n == value.size()) break;
value.RemovePrefix(n);
}
if (sync) {
TENSORSTORE_RETURN_IF_ERROR(internal_os::FsyncFile(fd));
}
FileInfo info;
TENSORSTORE_RETURN_IF_ERROR(internal_os::GetFileInfo(fd, &info));
TENSORSTORE_RETURN_IF_ERROR(
internal_os::RenameOpenFile(fd, fd_path, rename_path));
#if 0
FileInfo debug_info;
ABSL_CHECK_OK(internal_os::GetFileInfo(fd, &debug_info));
ABSL_CHECK_EQ(GetFileGeneration(info), GetFileGeneration(debug_info));
#endif
file_metrics.write_latency_ms.Observe(
absl::ToInt64Milliseconds(absl::Now() - start_write));
return GetFileGeneration(info);
}
Result<UniqueFileDescriptor> OpenLockFile(const std::string& path,
FileInfo* info) {
auto fd = internal_os::OpenFileForWriting(path);
if (!fd.ok()) return fd;
TENSORSTORE_RETURN_IF_ERROR(internal_os::GetFileInfo(fd->get(), info));
if (!internal_os::IsRegularFile(*info)) {
return absl::FailedPreconditionError(
absl::StrCat("Not a regular file: ", path));
}
return fd;
}
struct WriteLockHelper {
std::string lock_path;
UniqueFileDescriptor lock_fd;
std::optional<internal_os::UnlockFn> unlock_fn;
WriteLockHelper(const std::string& path)
: lock_path(absl::StrCat(path, kLockSuffix)) {}
~WriteLockHelper() { Unlock(); }
absl::Status CreateAndAcquire() {
FileInfo a, b;
FileInfo* info = &a;
TENSORSTORE_ASSIGN_OR_RETURN(lock_fd, OpenLockFile(lock_path, info));
while (true) {
TENSORSTORE_ASSIGN_OR_RETURN(
unlock_fn, internal_os::AcquireFdLock(lock_fd.get()),
MaybeAnnotateStatus(_,
absl::StrCat("Failed to acquire lock on file: ",
QuoteString(lock_path))));
FileInfo* other_info = info == &a ? &b : &a;
TENSORSTORE_ASSIGN_OR_RETURN(UniqueFileDescriptor other_fd,
OpenLockFile(lock_path, other_info));
if (internal_os::GetDeviceId(a) == internal_os::GetDeviceId(b) &&
internal_os::GetFileId(a) == internal_os::GetFileId(b)) {
return absl::OkStatus();
}
Unlock();
info = other_info;
lock_fd = std::move(other_fd);
file_metrics.lock_contention.Increment();
}
}
absl::Status Delete() {
auto status = internal_os::DeleteOpenFile(lock_fd.get(), lock_path);
if (status.ok() || absl::IsNotFound(status)) {
return absl::OkStatus();
}
return MaybeAnnotateStatus(std::move(status), "Failed to clean lock file");
}
void Unlock() {
if (unlock_fn) {
std::move (*unlock_fn)(lock_fd.get());
unlock_fn = std::nullopt;
}
}
};
struct WriteTask {
std::string full_path;
absl::Cord value;
kvstore::WriteOptions options;
bool sync;
Result<TimestampedStorageGeneration> operator()() const {
ABSL_LOG_IF(INFO, file_logging) << "WriteTask " << full_path;
TimestampedStorageGeneration r;
r.time = absl::Now();
TENSORSTORE_ASSIGN_OR_RETURN(auto dir_fd, OpenParentDirectory(full_path));
WriteLockHelper lock_helper(full_path);
TENSORSTORE_RETURN_IF_ERROR(lock_helper.CreateAndAcquire());
bool delete_lock_file = true;
FileDescriptor fd = lock_helper.lock_fd.get();
const std::string& lock_path = lock_helper.lock_path;
auto generation_result = [&]() -> Result<StorageGeneration> {
if (!StorageGeneration::IsUnknown(
options.generation_conditions.if_equal)) {
StorageGeneration generation;
TENSORSTORE_ASSIGN_OR_RETURN(UniqueFileDescriptor value_fd,
OpenValueFile(full_path, &generation));
if (generation != options.generation_conditions.if_equal) {
return StorageGeneration::Unknown();
}
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto generation,
WriteWithSyncAndRename(fd, lock_path, value, sync, full_path));
delete_lock_file = false;
if (sync) {
TENSORSTORE_RETURN_IF_ERROR(
internal_os::FsyncDirectory(dir_fd.get()),
MaybeAnnotateStatus(
_, absl::StrCat("Error calling fsync on parent directory of: ",
full_path)));
}
lock_helper.Unlock();
return generation;
}();
if (delete_lock_file) {
lock_helper.Delete().IgnoreError();
}
if (!generation_result) {
return std::move(generation_result).status();
}
r.generation = *std::move(generation_result);
return r;
}
};
struct DeleteTask {
std::string full_path;
kvstore::WriteOptions options;
bool sync;
Result<TimestampedStorageGeneration> operator()() const {
ABSL_LOG_IF(INFO, file_logging) << "DeleteTask " << full_path;
TimestampedStorageGeneration r;
r.time = absl::Now();
WriteLockHelper lock_helper(full_path);
TENSORSTORE_ASSIGN_OR_RETURN(auto dir_fd, OpenParentDirectory(full_path));
TENSORSTORE_RETURN_IF_ERROR(lock_helper.CreateAndAcquire());
bool fsync_directory = false;
auto generation_result = [&]() -> Result<StorageGeneration> {
if (!StorageGeneration::IsUnknown(
options.generation_conditions.if_equal)) {
StorageGeneration generation;
TENSORSTORE_ASSIGN_OR_RETURN(UniqueFileDescriptor value_fd,
OpenValueFile(full_path, &generation));
if (generation != options.generation_conditions.if_equal) {
return StorageGeneration::Unknown();
}
}
auto status = internal_os::DeleteFile(full_path);
if (!status.ok() && !absl::IsNotFound(status)) {
return status;
}
fsync_directory = sync;
return StorageGeneration::NoValue();
}();
TENSORSTORE_RETURN_IF_ERROR(lock_helper.Delete());
if (fsync_directory) {
TENSORSTORE_RETURN_IF_ERROR(
internal_os::FsyncDirectory(dir_fd.get()),
MaybeAnnotateStatus(
_, absl::StrCat("Error calling fsync on parent directory of: ",
QuoteString(full_path))));
}
if (!generation_result) {
return std::move(generation_result).status();
}
r.generation = *std::move(generation_result);
return r;
}
};
Future<TimestampedStorageGeneration> FileKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
file_metrics.write.Increment();
TENSORSTORE_RETURN_IF_ERROR(ValidateKey(key));
if (value) {
return MapFuture(executor(), WriteTask{std::move(key), *std::move(value),
std::move(options), this->sync()});
} else {
return MapFuture(executor(), DeleteTask{std::move(key), std::move(options),
this->sync()});
}
}
struct DeleteRangeTask {
KeyRange range;
void operator()(Promise<void> promise) {
ABSL_LOG_IF(INFO, file_logging) << "DeleteRangeTask " << range;
std::string prefix(internal_file_util::LongestDirectoryPrefix(range));
absl::Status delete_status;
auto status = internal_os::RecursiveFileList(
prefix,
[&](std::string_view path) {
return tensorstore::IntersectsPrefix(range, path);
},
[&](auto entry) -> absl::Status {
if (!promise.result_needed()) return absl::CancelledError("");
bool do_delete = false;
if (entry.IsDirectory()) {
do_delete = tensorstore::ContainsPrefix(range, entry.GetFullPath());
} else {
do_delete = tensorstore::Contains(range, entry.GetFullPath());
}
if (do_delete) {
auto s = entry.Delete();
if (!s.ok() && !absl::IsNotFound(s) &&
!absl::IsFailedPrecondition(s)) {
ABSL_LOG_IF(INFO, file_logging) << s;
delete_status.Update(s);
}
}
return absl::OkStatus();
});
if (!status.ok()) {
promise.SetResult(MakeResult(std::move(status)));
}
promise.SetResult(MakeResult(std::move(delete_status)));
}
};
Future<const void> FileKeyValueStore::DeleteRange(KeyRange range) {
file_metrics.delete_range.Increment();
if (range.empty()) return absl::OkStatus();
TENSORSTORE_RETURN_IF_ERROR(ValidateKeyRange(range));
return PromiseFuturePair<void>::Link(
WithExecutor(executor(), DeleteRangeTask{std::move(range)}))
.future;
}
struct ListTask {
kvstore::ListOptions options;
ListReceiver receiver;
void operator()() {
ABSL_LOG_IF(INFO, file_logging) << "ListTask " << options.range;
std::atomic<bool> cancelled = false;
execution::set_starting(receiver, [&cancelled] {
cancelled.store(true, std::memory_order_relaxed);
});
std::string prefix(
internal_file_util::LongestDirectoryPrefix(options.range));
auto status = internal_os::RecursiveFileList(
prefix,
[&](std::string_view path) {
return tensorstore::IntersectsPrefix(options.range, path);
},
[&](auto entry) -> absl::Status {
if (cancelled.load(std::memory_order_relaxed)) {
return absl::CancelledError("");
}
if (entry.IsDirectory()) return absl::OkStatus();
std::string_view path = entry.GetFullPath();
if (tensorstore::Contains(options.range, path) &&
!absl::EndsWith(path, kLockSuffix)) {
path.remove_prefix(options.strip_prefix_length);
execution::set_value(receiver,
ListEntry{std::string(path), entry.GetSize()});
}
return absl::OkStatus();
});
if (!status.ok() && !cancelled.load(std::memory_order_relaxed)) {
execution::set_error(receiver, std::move(status));
execution::set_stopping(receiver);
return;
}
execution::set_done(receiver);
execution::set_stopping(receiver);
}
};
void FileKeyValueStore::ListImpl(ListOptions options, ListReceiver receiver) {
file_metrics.list.Increment();
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
if (auto error = ValidateKeyRange(options.range); !error.ok()) {
execution::set_starting(receiver, [] {});
execution::set_error(receiver, std::move(error));
execution::set_stopping(receiver);
return;
}
executor()(ListTask{std::move(options), std::move(receiver)});
}
Future<kvstore::DriverPtr> FileKeyValueStoreSpec::DoOpen() const {
auto driver_ptr = internal::MakeIntrusivePtr<FileKeyValueStore>();
driver_ptr->spec_ = data_;
return driver_ptr;
}
Result<kvstore::Spec> ParseFileUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == internal_file_kvstore::FileKeyValueStoreSpec::id);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
std::string path = internal::PercentDecode(parsed.authority_and_path);
auto driver_spec = internal::MakeIntrusivePtr<FileKeyValueStoreSpec>();
driver_spec->data_.file_io_concurrency =
Context::Resource<internal::FileIoConcurrencyResource>::DefaultSpec();
driver_spec->data_.file_io_sync =
Context::Resource<FileIoSyncResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec), std::move(path)};
}
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::internal_file_kvstore::FileKeyValueStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::internal_file_kvstore::FileKeyValueStoreSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{
tensorstore::internal_file_kvstore::FileKeyValueStoreSpec::id,
tensorstore::internal_file_kvstore::ParseFileUrl};
} | #include <errno.h>
#include <stddef.h>
#include <cstring>
#include <fstream>
#include <string>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/notification.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/os/filesystem.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
#ifndef _WIN32
#include <sys/stat.h>
#include <unistd.h>
#endif
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::CompletionNotifyingReceiver;
using ::tensorstore::IsOkAndHolds;
using ::tensorstore::KeyRange;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::internal::MatchesListEntry;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal_os::GetDirectoryContents;
using ::tensorstore::internal_testing::ScopedCurrentWorkingDirectory;
using ::tensorstore::internal_testing::ScopedTemporaryDirectory;
using ::testing::HasSubstr;
KvStore GetStore(std::string root) {
return kvstore::Open({{"driver", "file"}, {"path", root + "/"}}).value();
}
TEST(FileKeyValueStoreTest, Basic) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST(FileKeyValueStoreTest, InvalidKey) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
EXPECT_THAT(kvstore::Read(store, "this_is_a_long_key").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(
kvstore::Read(store, "").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, std::string("\0", 1)).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Write(store, "", {}).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "/").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, ".").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "..").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a/./b").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a/../b").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a/").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a.__lock").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a/b.__lock/c").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
}
TEST(FileKeyValueStoreTest, LockFiles) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
TENSORSTORE_ASSERT_OK(
kvstore::Write(store, "a/foo", absl::Cord("xyz"),
{StorageGeneration::NoValue()})
.result());
EXPECT_THAT(GetDirectoryContents(root),
::testing::UnorderedElementsAre("a", "a/foo"));
EXPECT_THAT(
kvstore::Write(store, "a/foo", absl::Cord("qqq"),
{StorageGeneration::NoValue()})
.result(),
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()));
EXPECT_THAT(GetDirectoryContents(root),
::testing::UnorderedElementsAre("a", "a/foo"));
{ std::ofstream x(root + "/a/foo.__lock"); }
EXPECT_THAT(GetDirectoryContents(root),
::testing::UnorderedElementsAre("a", "a/foo", "a/foo.__lock"));
EXPECT_THAT(
ListFuture(store).result(),
IsOkAndHolds(::testing::UnorderedElementsAre(MatchesListEntry("a/foo"))));
TENSORSTORE_ASSERT_OK(
kvstore::Write(store, "a/foo", absl::Cord("xyz")).result());
{ std::ofstream x(root + "/a/foo.__lock"); }
TENSORSTORE_EXPECT_OK(kvstore::DeleteRange(store, KeyRange::Prefix("a/")));
EXPECT_THAT(GetDirectoryContents(root), ::testing::UnorderedElementsAre("a"));
}
TEST(FileKeyValueStoreTest, NestedDirectories) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/foo", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(
kvstore::Write(store, "a/ba/ccc/dddd", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(
kvstore::Write(store, "a/ba/ccc/foo", absl::Cord("xyz")));
EXPECT_THAT(
kvstore::Write(store, "a/ba/ccc", absl::Cord("xyz")).result(),
::testing::AnyOf(MatchesStatus(absl::StatusCode::kPermissionDenied),
MatchesStatus(absl::StatusCode::kFailedPrecondition)));
}
TEST(FileKeyValueStoreTest, ConcurrentWrites) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
tensorstore::internal::TestConcurrentWritesOptions options;
options.get_store = [&] { return GetStore(root); };
tensorstore::internal::TestConcurrentWrites(options);
}
#ifndef _WIN32
TEST(FileKeyValueStoreTest, Permissions) {
if (::geteuid() == 0) {
return;
}
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
TENSORSTORE_ASSERT_OK(
kvstore::Write(store, "foo", absl::Cord("xyz")).result());
ASSERT_EQ(0, ::chmod(root.c_str(), 0500))
<< "Error " << errno << ": " << ::strerror(errno);
struct RestoreWritePermission {
std::string path;
~RestoreWritePermission() {
EXPECT_EQ(0, ::chmod(path.c_str(), 0700))
<< "Error " << errno << ": " << ::strerror(errno);
}
};
RestoreWritePermission restore{root};
EXPECT_EQ("xyz", kvstore::Read(store, "foo").value().value);
EXPECT_THAT(kvstore::Write(store, "foo", absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
EXPECT_EQ("xyz", kvstore::Read(store, "foo").value().value);
EXPECT_THAT(kvstore::Write(store, "bar", absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
EXPECT_THAT(kvstore::Read(store, "bar").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(kvstore::Delete(store, "foo").result(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
ASSERT_EQ(0, ::chmod((root + "/foo").c_str(), 0))
<< "Error " << errno << ": " << ::strerror(errno);
EXPECT_THAT(kvstore::Read(store, "foo").result(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
}
#endif
TEST(FileKeyValueStoreTest, DeletePrefix) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST(FileKeyValueStoreTest, DeleteRange) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST(FileKeyValueStoreTest, DeleteRangeToEnd) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST(FileKeyValueStoreTest, DeleteRangeFromBeginning) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
#if 0
TEST(FileKeyValueStoreTest, CopyRange) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreCopyRange(store);
}
#endif
TEST(FileKeyValueStoreTest, ListErrors) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {KeyRange::Prefix("a
CompletionNotifyingReceiver{¬ification,
tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log,
::testing::ElementsAre(
"set_starting",
HasSubstr("set_error: INVALID_ARGUMENT: Invalid key: "),
"set_stopping"));
}
}
TEST(FileKeyValueStoreTest, List) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreList(store, false);
}
TEST(FileKeyValueStoreTest, SpecRoundtrip) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.full_spec = {{"driver", "file"}, {"path", root}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(FileKeyValueStoreTest, SpecRoundtripSync) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.full_spec = {
{"driver", "file"},
{"path", root},
{"file_io_sync", false},
{"context",
{
{"file_io_concurrency", ::nlohmann::json::object_t()},
}},
};
options.spec_request_options.Set(tensorstore::retain_context).IgnoreError();
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(FileKeyValueStoreTest, InvalidSpec) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto context = tensorstore::Context::Default();
EXPECT_THAT(
kvstore::Open({{"driver", "file"}, {"path", root}, {"extra", "key"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open({{"driver", "file"}, {"path", 5}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Open({{"driver", "file"}, {"path", "/a/../b/"}}, context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid file path.*"));
}
TEST(FileKeyValueStoreTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip({{"driver", "file"}},
"file:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "file"}, {"path", "/abc/"}}, "file:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "file"}, {"path", "/abc def/"}}, "file:
}
TEST(FileKeyValueStoreTest, UrlOpen) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open("file:
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST(FileKeyValueStoreTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("file:
EXPECT_THAT(kvstore::Spec::FromUrl("file:
EXPECT_THAT(kvstore::Spec::FromUrl("file:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Query string not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("file:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("file:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid file path.*"));
}
TEST(FileKeyValueStoreTest, RelativePath) {
ScopedTemporaryDirectory tempdir;
ScopedCurrentWorkingDirectory scoped_cwd(tempdir.path());
auto store = GetStore("tmp/dataset");
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "abc", {}).result());
}
TEST(FileKeyValueStoreTest, BatchRead) {
ScopedTemporaryDirectory tempdir;
auto store = GetStore(tempdir.path());
tensorstore::internal::BatchReadGenericCoalescingTestOptions options;
options.coalescing_options.max_extra_read_bytes = 255;
options.metric_prefix = "/tensorstore/kvstore/file/";
options.has_file_open_metric = true;
tensorstore::internal::TestBatchReadGenericCoalescing(store, options);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/file/file_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/file/file_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
850ceba7-2580-4f7f-8fe5-1dbc2dc1582c | cpp | google/tensorstore | util | tensorstore/kvstore/file/util.cc | tensorstore/kvstore/file/util_test.cc | #include "tensorstore/kvstore/file/util.h"
#include <stddef.h>
#include <string_view>
#include "absl/strings/match.h"
#include "absl/strings/str_split.h"
#include "tensorstore/kvstore/key_range.h"
namespace tensorstore {
namespace internal_file_util {
bool IsKeyValid(std::string_view key, std::string_view lock_suffix) {
if (absl::StrContains(key, '\0')) return false;
if (key.empty()) return false;
if (key.back() == '/' || key.back() == '\\') {
return false;
}
if (key.front() == '/' || key.front() == '\\') {
key = key.substr(1);
}
for (std::string_view component :
absl::StrSplit(key, absl::ByAnyChar("/\\"))) {
if (component.empty()) return false;
if (component == ".") return false;
if (component == "..") return false;
if (!lock_suffix.empty() && component.size() >= lock_suffix.size() &&
absl::EndsWith(component, lock_suffix)) {
return false;
}
}
return true;
}
std::string_view LongestDirectoryPrefix(const KeyRange& range) {
std::string_view prefix = tensorstore::LongestPrefix(range);
const size_t i = prefix.rfind('/');
if (i == std::string_view::npos) return {};
return prefix.substr(0, i);
}
}
} | #include "tensorstore/kvstore/file/util.h"
#include <string_view>
#include <gtest/gtest.h>
#include "tensorstore/kvstore/key_range.h"
namespace {
using ::tensorstore::KeyRange;
using ::tensorstore::internal_file_util::IsKeyValid;
using ::tensorstore::internal_file_util::LongestDirectoryPrefix;
TEST(IsKeyValid, Basic) {
EXPECT_TRUE(IsKeyValid("tmp/root", ""));
EXPECT_TRUE(IsKeyValid("a", ""));
EXPECT_TRUE(IsKeyValid("a/b", ""));
EXPECT_TRUE(IsKeyValid("/tmp/root", ""));
EXPECT_TRUE(IsKeyValid("a\\b", ""));
EXPECT_FALSE(IsKeyValid("", ""));
EXPECT_FALSE(IsKeyValid("/", ""));
EXPECT_FALSE(IsKeyValid("
EXPECT_FALSE(IsKeyValid("
EXPECT_FALSE(IsKeyValid("/tmp/root/", ""));
EXPECT_FALSE(IsKeyValid("tmp
EXPECT_FALSE(IsKeyValid("tmp/./root", ""));
EXPECT_FALSE(IsKeyValid("tmp/../root", ""));
EXPECT_FALSE(IsKeyValid("tmp/root/", ""));
EXPECT_FALSE(IsKeyValid("tmp/.lock/a", ".lock"));
EXPECT_FALSE(IsKeyValid("tmp/foo.lock/a", ".lock"));
EXPECT_FALSE(IsKeyValid("\\", ""));
EXPECT_FALSE(IsKeyValid("tmp\\..\\root", ""));
EXPECT_FALSE(IsKeyValid("tmp\\root\\", ""));
EXPECT_FALSE(IsKeyValid(std::string_view("tmp/\0bar", 8), ""));
}
TEST(LongestDirectoryPrefix, Basic) {
EXPECT_EQ("", LongestDirectoryPrefix(KeyRange{"a", "b"}));
EXPECT_EQ("", LongestDirectoryPrefix(KeyRange{"/a", "/b"}));
EXPECT_EQ("/a", LongestDirectoryPrefix(KeyRange{"/a/a", "/a/b"}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/file/util.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/file/util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ecb453fe-d729-4840-a504-6bf8700709b8 | cpp | google/tensorstore | kvstore_server | tensorstore/kvstore/tsgrpc/kvstore_server.cc | tensorstore/kvstore/tsgrpc/kvstore_server_test.cc | #include "tensorstore/kvstore/tsgrpc/kvstore_server.h"
#include <stddef.h>
#include <atomic>
#include <cassert>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/server_callback.h"
#include "grpcpp/support/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/grpc/server_credentials.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/tsgrpc/common.h"
#include "tensorstore/kvstore/tsgrpc/common.pb.h"
#include "tensorstore/kvstore/tsgrpc/handler_template.h"
#include "tensorstore/proto/encode_time.h"
#include "tensorstore/proto/proto_util.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.grpc.pb.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.pb.h"
#include "tensorstore/util/span.h"
using ::grpc::CallbackServerContext;
using ::tensorstore::internal_metrics::MetricMetadata;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore_grpc::EncodeGenerationAndTimestamp;
using ::tensorstore_grpc::Handler;
using ::tensorstore_grpc::StreamHandler;
using ::tensorstore_grpc::kvstore::DeleteRequest;
using ::tensorstore_grpc::kvstore::DeleteResponse;
using ::tensorstore_grpc::kvstore::ListRequest;
using ::tensorstore_grpc::kvstore::ListResponse;
using ::tensorstore_grpc::kvstore::ReadRequest;
using ::tensorstore_grpc::kvstore::ReadResponse;
using ::tensorstore_grpc::kvstore::WriteRequest;
using ::tensorstore_grpc::kvstore::WriteResponse;
using ::tensorstore_grpc::kvstore::grpc_gen::KvStoreService;
namespace tensorstore {
namespace {
namespace jb = ::tensorstore::internal_json_binding;
auto& read_metric = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/tsgrpc_server/read",
MetricMetadata("KvStoreService::Read calls"));
auto& write_metric = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/tsgrpc_server/write",
MetricMetadata("KvStoreService::Write calls"));
auto& delete_metric = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/tsgrpc_server/delete",
MetricMetadata("KvStoreService::Delete calls"));
auto& list_metric = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/tsgrpc_server/list",
MetricMetadata("KvStoreService::List calls"));
ABSL_CONST_INIT internal_log::VerboseFlag verbose_logging("tsgrpc_kvstore");
class ReadHandler final : public Handler<ReadRequest, ReadResponse> {
using Base = Handler<ReadRequest, ReadResponse>;
public:
ReadHandler(CallbackServerContext* grpc_context, const Request* request,
Response* response, KvStore kvstore)
: Base(grpc_context, request, response), kvstore_(std::move(kvstore)) {}
void Run() {
ABSL_LOG_IF(INFO, verbose_logging)
<< "ReadHandler " << ConciseDebugString(*request());
kvstore::ReadOptions options{};
options.generation_conditions.if_equal.value =
request()->generation_if_equal();
options.generation_conditions.if_not_equal.value =
request()->generation_if_not_equal();
if (request()->has_byte_range()) {
options.byte_range.inclusive_min =
request()->byte_range().inclusive_min();
options.byte_range.exclusive_max =
request()->byte_range().exclusive_max();
if (!options.byte_range.SatisfiesInvariants()) {
Finish(absl::InvalidArgumentError("Invalid byte range"));
return;
}
}
if (request()->has_staleness_bound()) {
TENSORSTORE_ASSIGN_OR_RETURN(
options.staleness_bound,
internal::ProtoToAbslTime(request()->staleness_bound()), Finish(_));
}
internal::IntrusivePtr<ReadHandler> self{this};
future_ =
PromiseFuturePair<void>::Link(
[self = std::move(self)](tensorstore::Promise<void> promise,
auto read_result) {
if (!promise.result_needed()) return;
promise.SetResult(self->HandleResult(read_result.result()));
},
tensorstore::kvstore::Read(kvstore_, request()->key(), options))
.future;
}
void OnCancel() final {
if (future_.ready()) return;
future_ = {};
Finish(::grpc::Status(::grpc::StatusCode::CANCELLED, ""));
}
absl::Status HandleResult(const Result<kvstore::ReadResult>& result) {
auto status = result.status();
if (status.ok()) {
auto& r = result.value();
response()->set_state(static_cast<ReadResponse::State>(r.state));
EncodeGenerationAndTimestamp(r.stamp, response());
if (r.has_value()) {
response()->set_value(r.value);
}
}
Finish(status);
return status;
}
private:
KvStore kvstore_;
Future<void> future_;
};
class WriteHandler final : public Handler<WriteRequest, WriteResponse> {
using Base = Handler<WriteRequest, WriteResponse>;
public:
WriteHandler(CallbackServerContext* grpc_context, const Request* request,
Response* response, KvStore kvstore)
: Base(grpc_context, request, response), kvstore_(std::move(kvstore)) {}
void Run() {
ABSL_LOG_IF(INFO, verbose_logging)
<< "WriteHandler " << ConciseDebugString(*request());
tensorstore::kvstore::WriteOptions options{};
options.generation_conditions.if_equal.value =
request()->generation_if_equal();
internal::IntrusivePtr<WriteHandler> self{this};
future_ =
PromiseFuturePair<void>::Link(
[self = std::move(self)](Promise<void> promise, auto write_result) {
if (!promise.result_needed()) return;
promise.SetResult(self->HandleResult(write_result.result()));
},
kvstore::Write(kvstore_, request()->key(),
absl::Cord(request()->value()), options))
.future;
}
void OnCancel() final {
if (future_.ready()) return;
future_ = {};
Finish(::grpc::Status(::grpc::StatusCode::CANCELLED, ""));
}
absl::Status HandleResult(
const tensorstore::Result<TimestampedStorageGeneration>& result) {
auto status = result.status();
if (status.ok()) {
EncodeGenerationAndTimestamp(result.value(), response());
}
Finish(status);
return status;
}
private:
KvStore kvstore_;
Future<void> future_;
};
class DeleteHandler final : public Handler<DeleteRequest, DeleteResponse> {
using Base = Handler<DeleteRequest, DeleteResponse>;
public:
DeleteHandler(CallbackServerContext* grpc_context, const Request* request,
Response* response, KvStore kvstore)
: Base(grpc_context, request, response), kvstore_(std::move(kvstore)) {}
void Run() {
ABSL_LOG_IF(INFO, verbose_logging)
<< "DeleteHandler " << ConciseDebugString(*request());
internal::IntrusivePtr<DeleteHandler> self{this};
auto callback = [self = std::move(self)](Promise<void> promise,
auto del_result) {
if (!promise.result_needed()) return;
promise.SetResult(self->HandleResult(del_result.result()));
};
if (request()->has_range()) {
future_ = PromiseFuturePair<void>::Link(
std::move(callback),
kvstore::DeleteRange(
kvstore_, KeyRange(request()->range().inclusive_min(),
request()->range().exclusive_max())))
.future;
} else if (!request()->key().empty()) {
kvstore::WriteOptions options{};
options.generation_conditions.if_equal.value =
request()->generation_if_equal();
future_ =
PromiseFuturePair<void>::Link(
std::move(callback),
tensorstore::kvstore::Delete(kvstore_, request()->key(), options))
.future;
} else {
Finish(absl::InvalidArgumentError("Invalid request"));
}
}
void OnCancel() final {
if (future_.ready()) return;
future_ = {};
Finish(::grpc::Status(::grpc::StatusCode::CANCELLED, ""));
}
absl::Status HandleResult(const tensorstore::Result<void>& result) {
auto status = result.status();
Finish(status);
return status;
}
absl::Status HandleResult(
const tensorstore::Result<TimestampedStorageGeneration>& result) {
auto status = result.status();
if (status.ok()) {
EncodeGenerationAndTimestamp(result.value(), response());
}
Finish(status);
return status;
}
private:
tensorstore::KvStore kvstore_;
tensorstore::Future<void> future_;
};
class ListHandler final : public StreamHandler<ListRequest, ListResponse> {
using Base = StreamHandler<ListRequest, ListResponse>;
public:
ListHandler(CallbackServerContext* grpc_context, const Request* request,
tensorstore::KvStore kvstore)
: Base(grpc_context, request),
kvstore_(std::move(kvstore)),
estimated_size_(0),
cancel_([] {}) {}
void Run();
void OnCancel() final { cancel_(); }
void OnWriteDone(bool ok) final {
absl::MutexLock l(&mu_);
in_flight_msg_ = nullptr;
MaybeWrite();
}
void MaybeWrite() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (in_flight_msg_ != nullptr) return;
if (!current_) return;
if (done_) {
if (!status_.ok()) {
current_ = nullptr;
Finish(status_);
} else if (current_->entry().empty()) {
current_ = nullptr;
Finish(grpc::Status::OK);
} else {
in_flight_msg_ = std::move(current_);
StartWriteAndFinish(in_flight_msg_.get(), {}, grpc::Status::OK);
}
return;
}
constexpr size_t kTargetSize = 16 * 1024;
if (estimated_size_ < kTargetSize) return;
in_flight_msg_ = std::move(current_);
StartWrite(in_flight_msg_.get());
current_ = std::make_unique<ListResponse>();
estimated_size_ = 0;
}
[[maybe_unused]] friend void set_starting(
internal::IntrusivePtr<ListHandler>& self, AnyCancelReceiver cancel) {
absl::MutexLock l(&self->mu_);
self->cancel_ = std::move(cancel);
self->done_ = false;
self->current_ = std::make_unique<ListResponse>();
self->estimated_size_ = 0;
}
[[maybe_unused]] friend void set_value(
internal::IntrusivePtr<ListHandler>& self, ListEntry entry) {
absl::MutexLock l(&self->mu_);
auto* e = self->current_->add_entry();
e->set_key(entry.key);
e->set_size(entry.size);
self->estimated_size_ += entry.key.size();
self->MaybeWrite();
}
[[maybe_unused]] friend void set_done(
internal::IntrusivePtr<ListHandler>& self) {
self->cancel_ = [] {};
}
[[maybe_unused]] friend void set_error(
internal::IntrusivePtr<ListHandler>& self, absl::Status s) {
absl::MutexLock l(&self->mu_);
self->cancel_ = [] {};
self->status_ = s;
}
[[maybe_unused]] friend void set_stopping(
internal::IntrusivePtr<ListHandler>& self) {
absl::MutexLock l(&self->mu_);
self->done_ = true;
self->MaybeWrite();
}
private:
tensorstore::KvStore kvstore_;
absl::Mutex mu_;
absl::Status status_ ABSL_GUARDED_BY(mu_);
std::unique_ptr<ListResponse> current_ ABSL_GUARDED_BY(mu_);
std::unique_ptr<ListResponse> in_flight_msg_ ABSL_GUARDED_BY(mu_);
size_t estimated_size_ ABSL_GUARDED_BY(mu_);
tensorstore::AnyCancelReceiver cancel_;
std::atomic<bool> done_{true};
};
void ListHandler::Run() {
ABSL_LOG_IF(INFO, verbose_logging)
<< "ListHandler " << ConciseDebugString(*request());
tensorstore::kvstore::ListOptions options;
options.range = tensorstore::KeyRange(request()->range().inclusive_min(),
request()->range().exclusive_max());
options.strip_prefix_length = request()->strip_prefix_length();
if (request()->has_staleness_bound()) {
TENSORSTORE_ASSIGN_OR_RETURN(
options.staleness_bound,
internal::ProtoToAbslTime(request()->staleness_bound()), Finish(_));
}
internal::IntrusivePtr<ListHandler> self{this};
tensorstore::execution::submit(
tensorstore::kvstore::List(self->kvstore_, options), self);
}
}
namespace grpc_kvstore {
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
KvStoreServer::Spec,
jb::Object(jb::Member("base", jb::Projection<&KvStoreServer::Spec::base>()),
jb::Initialize([](auto* obj) {
internal::EnsureDirectoryPath(obj->base.path);
return absl::OkStatus();
}),
jb::Member("bind_addresses",
jb::Projection<&KvStoreServer::Spec::bind_addresses>(
jb::DefaultInitializedValue()))));
class KvStoreServer::Impl final : public KvStoreService::CallbackService {
public:
Impl(KvStore kvstore) : kvstore_(std::move(kvstore)) {}
::grpc::ServerUnaryReactor* Read(::grpc::CallbackServerContext* context,
const ReadRequest* request,
ReadResponse* response) override {
read_metric.Increment();
internal::IntrusivePtr<ReadHandler> handler(
new ReadHandler(context, request, response, kvstore_));
assert(handler->use_count() == 2);
handler->Run();
assert(handler->use_count() > 0);
if (handler->use_count() == 1) return nullptr;
return handler.get();
}
::grpc::ServerUnaryReactor* Write(::grpc::CallbackServerContext* context,
const WriteRequest* request,
WriteResponse* response) override {
write_metric.Increment();
internal::IntrusivePtr<WriteHandler> handler(
new WriteHandler(context, request, response, kvstore_));
assert(handler->use_count() == 2);
handler->Run();
assert(handler->use_count() > 0);
if (handler->use_count() == 1) return nullptr;
return handler.get();
}
::grpc::ServerUnaryReactor* Delete(::grpc::CallbackServerContext* context,
const DeleteRequest* request,
DeleteResponse* response) override {
delete_metric.Increment();
internal::IntrusivePtr<DeleteHandler> handler(
new DeleteHandler(context, request, response, kvstore_));
assert(handler->use_count() == 2);
handler->Run();
assert(handler->use_count() > 0);
if (handler->use_count() == 1) return nullptr;
return handler.get();
}
::grpc::ServerWriteReactor< ::tensorstore_grpc::kvstore::ListResponse>* List(
::grpc::CallbackServerContext* context,
const ListRequest* request) override {
list_metric.Increment();
internal::IntrusivePtr<ListHandler> handler(
new ListHandler(context, request, kvstore_));
assert(handler->use_count() == 2);
handler->Run();
if (handler->use_count() == 1) return nullptr;
return handler.get();
}
const KvStore& kvstore() const { return kvstore_; }
private:
friend class KvStoreServer;
KvStore kvstore_;
std::vector<int> listening_ports_;
std::unique_ptr<grpc::Server> server_;
};
KvStoreServer::KvStoreServer() = default;
KvStoreServer::~KvStoreServer() = default;
KvStoreServer::KvStoreServer(KvStoreServer&&) = default;
KvStoreServer& KvStoreServer::operator=(KvStoreServer&&) = default;
tensorstore::span<const int> KvStoreServer::ports() const {
return impl_->listening_ports_;
}
int KvStoreServer::port() const { return impl_->listening_ports_.front(); }
void KvStoreServer::Wait() { impl_->server_->Wait(); }
tensorstore::Result<KvStoreServer> KvStoreServer::Start(Spec spec,
Context context) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto kv, tensorstore::kvstore::Open(spec.base, context).result());
auto impl = std::make_unique<KvStoreServer::Impl>(std::move(kv));
auto creds = context.GetResource<tensorstore::GrpcServerCredentials>()
.value()
->GetCredentials();
grpc::ServerBuilder builder;
builder.RegisterService(impl.get());
if (spec.bind_addresses.empty()) {
spec.bind_addresses.push_back("[::]:0");
}
impl->listening_ports_.resize(spec.bind_addresses.size());
for (size_t i = 0; i < spec.bind_addresses.size(); ++i) {
builder.AddListeningPort(spec.bind_addresses[i], creds,
&impl->listening_ports_[i]);
}
impl->server_ = builder.BuildAndStart();
KvStoreServer server;
server.impl_ = std::move(impl);
return server;
}
}
} | #include "tensorstore/kvstore/tsgrpc/kvstore_server.h"
#include <string>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/notification.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::KeyRange;
using ::tensorstore::grpc_kvstore::KvStoreServer;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
class KvStoreSingleton {
public:
KvStoreSingleton() : ctx_(tensorstore::Context::Default()) {
server_ = KvStoreServer::Start(KvStoreServer::Spec::FromJson(
{
{"bind_addresses", {"localhost:0"}},
{"base", "memory:
})
.value(),
ctx_)
.value();
address_ = absl::StrFormat("localhost:%d", server_.port());
}
const std::string& address() const { return address_; }
private:
tensorstore::Context ctx_;
KvStoreServer server_;
std::string address_;
};
const KvStoreSingleton& GetSingleton() {
static const KvStoreSingleton* const kSingleton = new KvStoreSingleton();
return *kSingleton;
}
class KvStoreTest : public testing::Test {
public:
const std::string& address() const { return GetSingleton().address(); }
};
TEST_F(KvStoreTest, Basic) {
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::kvstore::Open({{"driver", "tsgrpc_kvstore"},
{"address", address()},
{"path", "basic/"}},
context)
.result());
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(KvStoreTest, DeleteRange) {
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::kvstore::Open({{"driver", "tsgrpc_kvstore"},
{"address", address()},
{"path", "delete_range/"}},
context)
.result());
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/b", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/d", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/x", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/y", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/e", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/f", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::DeleteRange(store, KeyRange::Prefix("a/c")));
EXPECT_EQ("xyz", kvstore::Read(store, "a/b").value().value);
EXPECT_EQ("xyz", kvstore::Read(store, "a/d").value().value);
EXPECT_THAT(kvstore::Read(store, "a/c/x").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(kvstore::Read(store, "a/c/y").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(kvstore::Read(store, "a/c/z/e").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(kvstore::Read(store, "a/c/z/f").result(),
MatchesKvsReadResultNotFound());
}
TEST_F(KvStoreTest, List) {
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::kvstore::Open({{"driver", "tsgrpc_kvstore"},
{"address", address()},
{"path", "list/"}},
context)
.result());
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_done",
"set_stopping"));
}
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/b", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/d", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/x", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/y", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/e", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/f", absl::Cord("xyz")));
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(
log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a/d", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e", "set_value: a/c/x",
"set_value: a/b", "set_done", "set_stopping"));
}
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {KeyRange::Prefix("a/c/")}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e",
"set_value: a/c/x", "set_done", "set_stopping"));
}
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::CancelOnStartingReceiver{{&log}}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_done",
"set_stopping"));
}
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::CancelAfterNReceiver<2>{{&log}}});
notification.WaitForNotification();
EXPECT_THAT(log,
::testing::ElementsAre(
"set_starting",
::testing::AnyOf("set_value: a/d", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e",
"set_value: a/c/x", "set_value: a/b"),
"set_done", "set_stopping"));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/tsgrpc/kvstore_server.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/tsgrpc/kvstore_server_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6e37f9e9-a68a-4e08-8899-9c880461c192 | cpp | google/tensorstore | tsgrpc | tensorstore/kvstore/tsgrpc/tsgrpc.cc | tensorstore/kvstore/tsgrpc/tsgrpc_test.cc | #include <stdint.h>
#include <atomic>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/concurrency_resource.h"
#include "tensorstore/internal/context_binding.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/grpc/client_credentials.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/tsgrpc/common.h"
#include "tensorstore/proto/encode_time.h"
#include "tensorstore/proto/proto_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/internal/cache_key/absl_time.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/kvstore/tsgrpc/common.pb.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.grpc.pb.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.pb.h"
using ::tensorstore::GrpcClientCredentials;
using ::tensorstore::internal::AbslTimeToProto;
using ::tensorstore::internal::DataCopyConcurrencyResource;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore_grpc::DecodeGenerationAndTimestamp;
using ::tensorstore_grpc::GetMessageStatus;
using ::tensorstore_grpc::kvstore::DeleteRequest;
using ::tensorstore_grpc::kvstore::DeleteResponse;
using ::tensorstore_grpc::kvstore::ListRequest;
using ::tensorstore_grpc::kvstore::ListResponse;
using ::tensorstore_grpc::kvstore::ReadRequest;
using ::tensorstore_grpc::kvstore::ReadResponse;
using ::tensorstore_grpc::kvstore::WriteRequest;
using ::tensorstore_grpc::kvstore::WriteResponse;
using ::tensorstore_grpc::kvstore::grpc_gen::KvStoreService;
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct TsGrpcMetrics : public internal_kvstore::CommonReadMetrics,
public internal_kvstore::CommonWriteMetrics {
internal_metrics::Counter<int64_t>& delete_calls;
};
auto tsgrpc_metrics = []() -> TsGrpcMetrics {
return {TENSORSTORE_KVSTORE_COMMON_READ_METRICS(tsgrpc),
TENSORSTORE_KVSTORE_COMMON_WRITE_METRICS(tsgrpc),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
tsgrpc, delete_calls, "kvstore::Write calls deleting a key")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag verbose_logging("tsgrpc_kvstore");
struct TsGrpcKeyValueStoreSpecData {
std::string address;
absl::Duration timeout;
Context::Resource<GrpcClientCredentials> credentials;
Context::Resource<DataCopyConcurrencyResource> data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.address, x.timeout, x.credentials, x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(GrpcClientCredentials::id,
jb::Projection<&TsGrpcKeyValueStoreSpecData::credentials>()),
jb::Member("address",
jb::Projection<&TsGrpcKeyValueStoreSpecData::address>()),
jb::Member("timeout",
jb::Projection<&TsGrpcKeyValueStoreSpecData::timeout>(
jb::DefaultValue<jb::kNeverIncludeDefaults>(
[](auto* x) { *x = absl::Seconds(60); }))),
jb::Member(
DataCopyConcurrencyResource::id,
jb::Projection<
&TsGrpcKeyValueStoreSpecData::data_copy_concurrency>())
);
};
class TsGrpcKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<
TsGrpcKeyValueStoreSpec, TsGrpcKeyValueStoreSpecData> {
public:
static constexpr char id[] = "tsgrpc_kvstore";
Future<kvstore::DriverPtr> DoOpen() const override;
};
class TsGrpcKeyValueStore
: public internal_kvstore::RegisteredDriver<TsGrpcKeyValueStore,
TsGrpcKeyValueStoreSpec> {
public:
void MaybeSetDeadline(grpc::ClientContext& context) {
if (spec_.timeout > absl::ZeroDuration() &&
spec_.timeout != absl::InfiniteDuration()) {
context.set_deadline(absl::ToChronoTime(absl::Now() + spec_.timeout));
}
}
const Executor& executor() const {
return spec_.data_copy_concurrency->executor;
}
KvStoreService::StubInterface* stub() { return stub_.get(); }
absl::Status GetBoundSpecData(SpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
Future<const void> DeleteRange(KeyRange range) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
SpecData spec_;
std::shared_ptr<grpc::Channel> channel_;
std::unique_ptr<KvStoreService::StubInterface> stub_;
};
struct ReadTask : public internal::AtomicReferenceCount<ReadTask> {
internal::IntrusivePtr<TsGrpcKeyValueStore> driver;
grpc::ClientContext context;
ReadRequest request;
ReadResponse response;
Future<kvstore::ReadResult> Start(kvstore::Key key,
const kvstore::ReadOptions& options) {
request.set_key(std::move(key));
request.set_generation_if_equal(
options.generation_conditions.if_equal.value);
request.set_generation_if_not_equal(
options.generation_conditions.if_not_equal.value);
if (!options.byte_range.IsFull()) {
request.mutable_byte_range()->set_inclusive_min(
options.byte_range.inclusive_min);
request.mutable_byte_range()->set_exclusive_max(
options.byte_range.exclusive_max);
}
if (options.staleness_bound != absl::InfiniteFuture()) {
AbslTimeToProto(options.staleness_bound,
request.mutable_staleness_bound());
}
driver->MaybeSetDeadline(context);
internal::IntrusivePtr<ReadTask> self(this);
auto pair = tensorstore::PromiseFuturePair<kvstore::ReadResult>::Make();
pair.promise.ExecuteWhenNotNeeded([self] { self->context.TryCancel(); });
driver->stub()->async()->Read(
&context, &request, &response,
WithExecutor(driver->executor(), [self = std::move(self),
promise = std::move(pair.promise)](
::grpc::Status s) {
if (!promise.result_needed()) return;
promise.SetResult(self->Ready(GrpcStatusToAbslStatus(s)));
}));
return std::move(pair.future);
}
Result<kvstore::ReadResult> Ready(absl::Status status) {
ABSL_LOG_IF(INFO, verbose_logging)
<< "ReadTask::Ready " << ConciseDebugString(response) << " " << status;
TENSORSTORE_RETURN_IF_ERROR(status);
TENSORSTORE_RETURN_IF_ERROR(GetMessageStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto stamp,
DecodeGenerationAndTimestamp(response));
return kvstore::ReadResult{
static_cast<kvstore::ReadResult::State>(response.state()),
absl::Cord(response.value()),
std::move(stamp),
};
}
};
struct WriteTask : public internal::AtomicReferenceCount<WriteTask> {
internal::IntrusivePtr<TsGrpcKeyValueStore> driver;
grpc::ClientContext context;
WriteRequest request;
WriteResponse response;
Future<TimestampedStorageGeneration> Start(
kvstore::Key key, const absl::Cord value,
const kvstore::WriteOptions& options) {
request.set_key(std::move(key));
request.set_value(value);
request.set_generation_if_equal(
options.generation_conditions.if_equal.value);
driver->MaybeSetDeadline(context);
internal::IntrusivePtr<WriteTask> self(this);
auto pair =
tensorstore::PromiseFuturePair<TimestampedStorageGeneration>::Make();
pair.promise.ExecuteWhenNotNeeded([self] { self->context.TryCancel(); });
driver->stub()->async()->Write(
&context, &request, &response,
WithExecutor(driver->executor(), [self = std::move(self),
promise = std::move(pair.promise)](
::grpc::Status s) {
if (!promise.result_needed()) return;
promise.SetResult(self->Ready(GrpcStatusToAbslStatus(s)));
}));
return std::move(pair.future);
}
Result<TimestampedStorageGeneration> Ready(absl::Status status) {
ABSL_LOG_IF(INFO, verbose_logging)
<< "WriteTask::Ready " << ConciseDebugString(response) << " " << status;
TENSORSTORE_RETURN_IF_ERROR(status);
TENSORSTORE_RETURN_IF_ERROR(GetMessageStatus(response));
return DecodeGenerationAndTimestamp(response);
}
};
struct DeleteTask : public internal::AtomicReferenceCount<DeleteTask> {
internal::IntrusivePtr<TsGrpcKeyValueStore> driver;
grpc::ClientContext context;
DeleteRequest request;
DeleteResponse response;
Future<TimestampedStorageGeneration> Start(
kvstore::Key key, const kvstore::WriteOptions options) {
request.set_key(std::move(key));
request.set_generation_if_equal(
options.generation_conditions.if_equal.value);
return StartImpl();
}
Future<TimestampedStorageGeneration> StartRange(KeyRange range) {
request.mutable_range()->set_inclusive_min(range.inclusive_min);
request.mutable_range()->set_exclusive_max(range.exclusive_max);
return StartImpl();
}
Future<TimestampedStorageGeneration> StartImpl() {
driver->MaybeSetDeadline(context);
internal::IntrusivePtr<DeleteTask> self(this);
auto pair =
tensorstore::PromiseFuturePair<TimestampedStorageGeneration>::Make();
pair.promise.ExecuteWhenNotNeeded([self] { self->context.TryCancel(); });
driver->stub()->async()->Delete(
&context, &request, &response,
WithExecutor(driver->executor(), [self = std::move(self),
promise = std::move(pair.promise)](
::grpc::Status s) {
if (!promise.result_needed()) return;
promise.SetResult(self->Ready(GrpcStatusToAbslStatus(s)));
}));
return std::move(pair.future);
}
Result<TimestampedStorageGeneration> Ready(absl::Status status) {
ABSL_LOG_IF(INFO, verbose_logging)
<< "DeleteTask::Ready " << ConciseDebugString(response) << " "
<< status;
TENSORSTORE_RETURN_IF_ERROR(status);
TENSORSTORE_RETURN_IF_ERROR(GetMessageStatus(response));
return DecodeGenerationAndTimestamp(response);
}
};
struct ListTask {
internal::IntrusivePtr<TsGrpcKeyValueStore> driver;
ListReceiver receiver;
grpc::ClientContext context;
std::atomic<bool> cancelled = false;
ListRequest request;
ListTask(internal::IntrusivePtr<TsGrpcKeyValueStore>&& driver,
ListReceiver&& receiver)
: driver(std::move(driver)), receiver(std::move(receiver)) {}
bool is_cancelled() { return cancelled.load(std::memory_order_relaxed); }
void try_cancel() {
if (!cancelled.load()) {
cancelled.store(true, std::memory_order_relaxed);
context.TryCancel();
}
}
void Run() {
driver->MaybeSetDeadline(context);
auto reader = driver->stub()->List(&context, request);
execution::set_starting(receiver, [this] { try_cancel(); });
absl::Status msg_status;
ListResponse response;
while (reader->Read(&response)) {
msg_status = GetMessageStatus(response);
if (!msg_status.ok()) {
try_cancel();
break;
}
for (const auto& entry : response.entry()) {
execution::set_value(receiver, ListEntry{entry.key(), entry.size()});
if (is_cancelled()) break;
}
if (is_cancelled()) break;
}
auto s = reader->Finish();
if (!msg_status.ok()) {
execution::set_error(receiver, msg_status);
} else if (s.ok() || is_cancelled()) {
execution::set_done(receiver);
} else {
execution::set_error(receiver, GrpcStatusToAbslStatus(s));
}
execution::set_stopping(receiver);
}
};
Future<kvstore::ReadResult> TsGrpcKeyValueStore::Read(Key key,
ReadOptions options) {
tsgrpc_metrics.read.Increment();
auto task = internal::MakeIntrusivePtr<ReadTask>();
task->driver = internal::IntrusivePtr<TsGrpcKeyValueStore>(this);
return task->Start(std::move(key), options);
}
Future<TimestampedStorageGeneration> TsGrpcKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
if (value) {
tsgrpc_metrics.write.Increment();
auto task = internal::MakeIntrusivePtr<WriteTask>();
task->driver = internal::IntrusivePtr<TsGrpcKeyValueStore>(this);
return task->Start(std::move(key), value.value(), options);
} else {
tsgrpc_metrics.delete_calls.Increment();
auto task = internal::MakeIntrusivePtr<DeleteTask>();
task->driver = internal::IntrusivePtr<TsGrpcKeyValueStore>(this);
return task->Start(std::move(key), options);
}
}
Future<const void> TsGrpcKeyValueStore::DeleteRange(KeyRange range) {
if (range.empty()) return absl::OkStatus();
tsgrpc_metrics.delete_range.Increment();
auto task = internal::MakeIntrusivePtr<DeleteTask>();
task->driver = internal::IntrusivePtr<TsGrpcKeyValueStore>(this);
return MapFuture(
InlineExecutor{},
[](const Result<TimestampedStorageGeneration>& result) {
return MakeResult(result.status());
},
task->StartRange(std::move(range)));
}
void TsGrpcKeyValueStore::ListImpl(ListOptions options, ListReceiver receiver) {
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
tsgrpc_metrics.list.Increment();
auto task = std::make_unique<ListTask>(
internal::IntrusivePtr<TsGrpcKeyValueStore>(this), std::move(receiver));
task->request.mutable_range()->set_inclusive_min(options.range.inclusive_min);
task->request.mutable_range()->set_exclusive_max(options.range.exclusive_max);
task->request.set_strip_prefix_length(options.strip_prefix_length);
if (options.staleness_bound != absl::InfiniteFuture()) {
AbslTimeToProto(options.staleness_bound,
task->request.mutable_staleness_bound());
}
executor()([task = std::move(task)] { task->Run(); });
}
Future<kvstore::DriverPtr> TsGrpcKeyValueStoreSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<TsGrpcKeyValueStore>();
driver->spec_ = data_;
ABSL_LOG_IF(INFO, verbose_logging)
<< "tsgrpc_kvstore address=" << data_.address;
driver->channel_ =
grpc::CreateChannel(data_.address, data_.credentials->GetCredentials());
driver->stub_ = KvStoreService::NewStub(driver->channel_);
return driver;
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::TsGrpcKeyValueStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::TsGrpcKeyValueStoreSpec>
registration;
} | #include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
#include "grpcpp/grpcpp.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "tensorstore/internal/grpc/grpc_mock.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/tsgrpc/mock_kvstore_service.h"
#include "tensorstore/proto/parse_text_proto_or_die.h"
#include "tensorstore/proto/protobuf_matchers.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.grpc.pb.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.pb.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::KeyRange;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::ParseTextProtoOrDie;
using ::tensorstore::StorageGeneration;
using ::testing::_;
using ::testing::DoAll;
using ::testing::Return;
using ::testing::SetArgPointee;
using ::tensorstore_grpc::MockKvStoreService;
using ::tensorstore_grpc::kvstore::DeleteRequest;
using ::tensorstore_grpc::kvstore::DeleteResponse;
using ::tensorstore_grpc::kvstore::ListRequest;
using ::tensorstore_grpc::kvstore::ListResponse;
using ::tensorstore_grpc::kvstore::ReadRequest;
using ::tensorstore_grpc::kvstore::ReadResponse;
using ::tensorstore_grpc::kvstore::WriteRequest;
using ::tensorstore_grpc::kvstore::WriteResponse;
class TsGrpcMockTest : public testing::Test {
public:
~TsGrpcMockTest() override { mock_service_.Shutdown(); }
TsGrpcMockTest() {
ON_CALL(mock(), Read).WillByDefault(Return(grpc::Status::CANCELLED));
ON_CALL(mock(), Write).WillByDefault(Return(grpc::Status::CANCELLED));
ON_CALL(mock(), Delete).WillByDefault(Return(grpc::Status::CANCELLED));
ON_CALL(mock(), List).WillByDefault(Return(grpc::Status::CANCELLED));
}
tensorstore::KvStore OpenStore() {
return kvstore::Open({
{"driver", "tsgrpc_kvstore"},
{"address", mock_service_.server_address()},
})
.value();
}
MockKvStoreService& mock() { return *mock_service_.service(); }
tensorstore::grpc_mocker::MockGrpcServer<MockKvStoreService> mock_service_;
};
TEST_F(TsGrpcMockTest, Read) {
ReadRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
)pb");
ReadResponse response = ParseTextProtoOrDie(R"pb(
state: 2
value: '1234'
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Read(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
kvstore::ReadResult result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Read(store, expected_request.key()).result());
}
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value, "1234");
EXPECT_EQ(result.stamp.time,
absl::FromUnixSeconds(1634327736) + absl::Nanoseconds(123456));
EXPECT_EQ(result.stamp.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, ReadWithOptions) {
ReadRequest expected_request = ParseTextProtoOrDie(R"pb(
key: "abc"
generation_if_not_equal: "abc\001"
generation_if_equal: "xyz\001"
byte_range { inclusive_min: 1 exclusive_max: 10 }
)pb");
EXPECT_CALL(mock(), Read(_, EqualsProto(expected_request), _))
.WillOnce(Return(grpc::Status::OK));
kvstore::ReadResult result;
{
kvstore::ReadOptions options;
options.generation_conditions.if_not_equal =
StorageGeneration::FromString("abc");
options.generation_conditions.if_equal =
StorageGeneration::FromString("xyz");
options.staleness_bound = absl::InfiniteFuture();
options.byte_range = OptionalByteRangeRequest{1, 10};
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Read(store, expected_request.key(), options).result());
}
EXPECT_EQ(result.stamp.generation, StorageGeneration::Unknown());
}
TEST_F(TsGrpcMockTest, Write) {
WriteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
value: '1234'
)pb");
WriteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Write(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Write(store, expected_request.key(),
absl::Cord(expected_request.value()))
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, WriteEmpty) {
WriteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
generation_if_equal: '\005'
)pb");
WriteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Write(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Write(store, expected_request.key(), absl::Cord(),
{StorageGeneration::NoValue()})
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, WriteWithOptions) {
WriteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
value: '1234'
generation_if_equal: "abc\001"
)pb");
WriteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Write(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Write(store, expected_request.key(),
absl::Cord(expected_request.value()),
{StorageGeneration::FromString("abc")})
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, WriteNullopt) {
DeleteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
generation_if_equal: '\005'
)pb");
DeleteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Delete(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Write(store, expected_request.key(), std::nullopt,
{StorageGeneration::NoValue()})
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, Delete) {
DeleteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
)pb");
DeleteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Delete(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Delete(store, expected_request.key()).result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, DeleteWithOptions) {
DeleteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
generation_if_equal: "abc\001"
)pb");
DeleteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Delete(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Delete(store, expected_request.key(),
{StorageGeneration::FromString("abc")})
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, DeleteRange) {
DeleteRequest expected_request = ParseTextProtoOrDie(R"pb(
range { inclusive_min: 'a/c' exclusive_max: 'a/d' }
)pb");
EXPECT_CALL(mock(), Delete(_, EqualsProto(expected_request), _))
.WillOnce(Return(grpc::Status::OK));
{
auto store = OpenStore();
TENSORSTORE_EXPECT_OK(
kvstore::DeleteRange(store, KeyRange::Prefix("a/c")).result());
}
}
TEST_F(TsGrpcMockTest, List) {
ListRequest expected_request = ParseTextProtoOrDie(R"pb(
range: {}
)pb");
ListResponse response = ParseTextProtoOrDie(R"pb(
entry { key: 'a' }
entry { key: 'b' }
entry { key: 'c' }
)pb");
EXPECT_CALL(mock(), List(_, EqualsProto(expected_request), _))
.WillOnce(testing::Invoke(
[=](auto*, auto*,
grpc::ServerWriter<ListResponse>* resp) -> ::grpc::Status {
resp->Write(response);
return grpc::Status::OK;
}));
std::vector<std::string> log;
{
auto store = OpenStore();
absl::Notification notification;
tensorstore::execution::submit(
tensorstore::kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
}
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a", "set_value: b",
"set_value: c", "set_done", "set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/tsgrpc/tsgrpc.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/tsgrpc/tsgrpc_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8e4c68dd-bf7a-461f-8359-b00cdc6e5b6d | cpp | google/tensorstore | gcs_grpc | tensorstore/kvstore/gcs_grpc/gcs_grpc.cc | tensorstore/kvstore/gcs_grpc/gcs_grpc_test.cc | #include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/crc/crc32c.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/client_context.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/client_callback.h"
#include "grpcpp/support/status.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/gcs/gcs_resource.h"
#include "tensorstore/kvstore/gcs/validate.h"
#include "tensorstore/kvstore/gcs_grpc/get_credentials.h"
#include "tensorstore/kvstore/gcs_grpc/storage_stub_pool.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/generic_coalescing_batch_util.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/cache_key/absl_time.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/context_binding.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/serialization/fwd.h"
#include "google/protobuf/empty.pb.h"
#include "google/storage/v2/storage.grpc.pb.h"
#include "google/storage/v2/storage.pb.h"
using ::tensorstore::internal::DataCopyConcurrencyResource;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
using ::tensorstore::internal::ScheduleAt;
using ::tensorstore::internal_gcs_grpc::GetCredentialsForEndpoint;
using ::tensorstore::internal_gcs_grpc::GetSharedStorageStubPool;
using ::tensorstore::internal_gcs_grpc::StorageStubPool;
using ::tensorstore::internal_storage_gcs::GcsUserProjectResource;
using ::tensorstore::internal_storage_gcs::IsRetriable;
using ::tensorstore::internal_storage_gcs::IsValidBucketName;
using ::tensorstore::internal_storage_gcs::IsValidObjectName;
using ::tensorstore::internal_storage_gcs::IsValidStorageGeneration;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore::kvstore::SupportedFeatures;
using ::google::storage::v2::DeleteObjectRequest;
using ::google::storage::v2::ListObjectsRequest;
using ::google::storage::v2::ListObjectsResponse;
using ::google::storage::v2::ReadObjectRequest;
using ::google::storage::v2::ReadObjectResponse;
using ::google::storage::v2::ServiceConstants;
using ::google::storage::v2::WriteObjectRequest;
using ::google::storage::v2::WriteObjectResponse;
using ::google::storage::v2::Storage;
namespace {
static constexpr char kUriScheme[] = "gcs_grpc";
static constexpr size_t kMaxWriteBytes =
ServiceConstants::MAX_WRITE_CHUNK_BYTES;
}
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct GcsMetrics : public internal_kvstore::CommonMetrics {
internal_metrics::Counter<int64_t>& retries;
};
auto gcs_grpc_metrics = []() -> GcsMetrics {
return {TENSORSTORE_KVSTORE_COMMON_METRICS(gcs_grpc),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
gcs_grpc, retries,
"Ccunt of all retried requests (read/write/delete)")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag gcs_grpc_logging("gcs_grpc");
struct GcsGrpcKeyValueStoreSpecData {
std::string bucket;
std::string endpoint;
uint32_t num_channels = 0;
absl::Duration timeout = absl::ZeroDuration();
absl::Duration wait_for_connection = absl::ZeroDuration();
Context::Resource<GcsUserProjectResource> user_project;
Context::Resource<internal_storage_gcs::GcsRequestRetries> retries;
Context::Resource<DataCopyConcurrencyResource> data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.bucket, x.endpoint, x.num_channels, x.timeout,
x.wait_for_connection, x.user_project, x.retries,
x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(
"bucket",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::bucket>(
jb::Validate([](const auto& options, const std::string* x) {
if (!IsValidBucketName(*x)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GCS bucket name: ", QuoteString(*x)));
}
return absl::OkStatus();
}))),
jb::Member("endpoint",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::endpoint>(
jb::DefaultInitializedValue())),
jb::Member("num_channels",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::num_channels>(
jb::DefaultInitializedValue())),
jb::Member("timeout",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::timeout>(
jb::DefaultValue<jb::kNeverIncludeDefaults>(
[](auto* x) { *x = absl::ZeroDuration(); }))),
jb::Member(
"wait_for_connection",
jb::Projection<&GcsGrpcKeyValueStoreSpecData::wait_for_connection>(
jb::DefaultValue<jb::kNeverIncludeDefaults>(
[](auto* x) { *x = absl::ZeroDuration(); }))),
jb::Member(GcsUserProjectResource::id,
jb::Projection<&GcsGrpcKeyValueStoreSpecData::user_project>()),
jb::Member(internal_storage_gcs::GcsRequestRetries::id,
jb::Projection<&GcsGrpcKeyValueStoreSpecData::retries>()),
jb::Member(
DataCopyConcurrencyResource::id,
jb::Projection<
&GcsGrpcKeyValueStoreSpecData::data_copy_concurrency>()),
jb::DiscardExtraMembers);
};
class GcsGrpcKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<
GcsGrpcKeyValueStoreSpec, GcsGrpcKeyValueStoreSpecData> {
public:
static constexpr char id[] = "gcs_grpc";
Future<kvstore::DriverPtr> DoOpen() const override;
absl::Status NormalizeSpec(std::string& path) override {
if (!path.empty() && !IsValidObjectName(path)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid GCS path: ", QuoteString(path)));
}
return absl::OkStatus();
}
Result<std::string> ToUrl(std::string_view path) const override {
if (!data_.endpoint.empty()) {
return absl::UnimplementedError(
"URL representation does not support test endpoints");
}
return tensorstore::StrCat(kUriScheme, ":
internal::PercentEncodeUriPath(path));
}
};
class GcsGrpcKeyValueStore
: public internal_kvstore::RegisteredDriver<GcsGrpcKeyValueStore,
GcsGrpcKeyValueStoreSpec> {
public:
internal_kvstore_batch::CoalescingOptions GetBatchReadCoalescingOptions()
const {
return internal_kvstore_batch::kDefaultRemoteStorageCoalescingOptions;
}
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<ReadResult> ReadImpl(Key&& key, ReadOptions&& options);
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
Future<const void> DeleteRange(KeyRange range) override;
absl::Status GetBoundSpecData(SpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
const Executor& executor() const {
return spec_.data_copy_concurrency->executor;
}
std::string bucket_name() { return bucket_; }
std::shared_ptr<Storage::StubInterface> get_stub() {
return storage_stub_pool_->get_next_stub();
}
std::unique_ptr<grpc::ClientContext> AllocateContext() {
auto context = std::make_unique<grpc::ClientContext>();
if (spec_.user_project->project_id &&
!spec_.user_project->project_id->empty()) {
context->AddMetadata("x-goog-user-project",
*spec_.user_project->project_id);
}
context->AddMetadata("x-goog-request-params",
absl::StrFormat("bucket=%s", bucket_name()));
if (spec_.timeout > absl::ZeroDuration() &&
spec_.timeout < absl::InfiniteDuration()) {
context->set_deadline(absl::ToChronoTime(absl::Now() + spec_.timeout));
}
if (call_credentials_fn_) {
context->set_credentials(call_credentials_fn_());
}
return context;
}
template <typename Task>
absl::Status BackoffForAttemptAsync(
absl::Status status, int attempt, Task* task,
SourceLocation loc = ::tensorstore::SourceLocation::current()) {
assert(task != nullptr);
auto delay = spec_.retries->BackoffForAttempt(attempt);
if (!delay) {
return MaybeAnnotateStatus(std::move(status),
absl::StrFormat("All %d retry attempts failed",
spec_.retries->max_retries),
absl::StatusCode::kAborted, loc);
}
gcs_grpc_metrics.retries.Increment();
ScheduleAt(absl::Now() + *delay,
WithExecutor(executor(), [task = internal::IntrusivePtr<Task>(
task)] { task->Retry(); }));
return absl::OkStatus();
}
SpecData spec_;
std::string bucket_;
std::shared_ptr<StorageStubPool> storage_stub_pool_;
std::function<std::shared_ptr<grpc::CallCredentials>()> call_credentials_fn_;
};
absl::crc32c_t ComputeCrc32c(const absl::Cord& cord) {
absl::crc32c_t crc{0};
for (auto chunk : cord.Chunks()) {
crc = absl::ExtendCrc32c(crc, chunk);
}
return crc;
}
struct ReadTask : public internal::AtomicReferenceCount<ReadTask>,
public grpc::ClientReadReactor<ReadObjectResponse> {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
kvstore::ReadOptions options_;
Promise<kvstore::ReadResult> promise_;
Storage::StubInterface* stub_ = nullptr;
ReadObjectRequest request_;
ReadObjectResponse response_;
std::optional<absl::crc32c_t> crc32c_;
TimestampedStorageGeneration storage_generation_;
absl::Cord value_;
int attempt_ = 0;
absl::Mutex mutex_;
std::unique_ptr<grpc::ClientContext> context_ ABSL_GUARDED_BY(mutex_);
void TryCancel() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
if (context_) context_->TryCancel();
}
void Start(const std::string& object_name) {
ABSL_LOG_IF(INFO, gcs_grpc_logging) << "ReadTask " << object_name;
stub_ = driver_->get_stub().get();
promise_.ExecuteWhenNotNeeded(
[self = internal::IntrusivePtr<ReadTask>(this)] { self->TryCancel(); });
request_.set_bucket(driver_->bucket_name());
request_.set_object(object_name);
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
uint64_t gen =
StorageGeneration::IsNoValue(options_.generation_conditions.if_equal)
? 0
: StorageGeneration::ToUint64(
options_.generation_conditions.if_equal);
request_.set_if_generation_match(gen);
}
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_not_equal)) {
uint64_t gen = StorageGeneration::IsNoValue(
options_.generation_conditions.if_not_equal)
? 0
: StorageGeneration::ToUint64(
options_.generation_conditions.if_not_equal);
request_.set_if_generation_not_match(gen);
}
if (options_.byte_range.inclusive_min != 0) {
request_.set_read_offset(options_.byte_range.inclusive_min);
}
if (options_.byte_range.exclusive_max != -1) {
auto target_size = options_.byte_range.size();
assert(target_size >= 0);
request_.set_read_limit(target_size == 0 ? 1 : target_size);
}
Retry();
}
void Retry() ABSL_LOCKS_EXCLUDED(mutex_) {
if (!promise_.result_needed()) {
return;
}
value_.Clear();
storage_generation_ =
TimestampedStorageGeneration{StorageGeneration::Unknown(), absl::Now()};
{
absl::MutexLock lock(&mutex_);
assert(context_ == nullptr);
context_ = driver_->AllocateContext();
intrusive_ptr_increment(this);
stub_->async()->ReadObject(context_.get(), &request_, this);
}
StartRead(&response_);
StartCall();
}
void OnReadDone(bool ok) override {
if (!ok) return;
if (!promise_.result_needed()) {
TryCancel();
return;
}
if (response_.has_metadata()) {
storage_generation_.generation =
StorageGeneration::FromUint64(response_.metadata().generation());
}
if (response_.has_object_checksums() &&
response_.object_checksums().crc32c() != 0 &&
options_.byte_range.inclusive_min == 0 &&
!options_.byte_range.exclusive_max) {
crc32c_ = absl::crc32c_t(response_.object_checksums().crc32c());
}
if (response_.has_content_range()) {
auto returned_size =
response_.content_range().end() - response_.content_range().start();
if (auto size = options_.byte_range.size();
(size > 0 && size != returned_size) ||
(options_.byte_range.inclusive_min >= 0 &&
response_.content_range().start() !=
options_.byte_range.inclusive_min)) {
promise_.SetResult(absl::OutOfRangeError(
tensorstore::StrCat("Requested byte range ", options_.byte_range,
" was not satisfied by GCS object with size ",
response_.content_range().complete_length())));
TryCancel();
return;
}
}
if (response_.has_checksummed_data() &&
response_.checksummed_data().has_crc32c() &&
response_.checksummed_data().crc32c() != 0) {
auto content_crc32c =
ComputeCrc32c(response_.checksummed_data().content());
if (content_crc32c !=
absl::crc32c_t(response_.checksummed_data().crc32c())) {
promise_.SetResult(absl::DataLossError(absl::StrFormat(
"Object fragment crc32c %08x does not match expected crc32c %08x",
static_cast<uint32_t>(content_crc32c),
response_.checksummed_data().crc32c())));
TryCancel();
return;
}
}
if (response_.has_checksummed_data()) {
gcs_grpc_metrics.bytes_read.IncrementBy(
response_.checksummed_data().content().size());
value_.Append(response_.checksummed_data().content());
}
StartRead(&response_);
}
void OnDone(const grpc::Status& s) override {
internal::IntrusivePtr<ReadTask> self(this, internal::adopt_object_ref);
driver_->executor()(
[self = std::move(self), status = GrpcStatusToAbslStatus(s)]() {
self->ReadFinished(std::move(status));
});
}
void ReadFinished(absl::Status status) {
if (!promise_.result_needed()) {
return;
}
{
absl::MutexLock lock(&mutex_);
context_ = nullptr;
}
if (!status.ok() && attempt_ == 0 &&
status.code() == absl::StatusCode::kUnauthenticated) {
attempt_++;
Retry();
return;
}
if (!status.ok() && IsRetriable(status)) {
status =
driver_->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
auto latency = absl::Now() - storage_generation_.time;
gcs_grpc_metrics.read_latency_ms.Observe(
absl::ToInt64Milliseconds(latency));
if (!status.ok()) {
if (absl::IsFailedPrecondition(status) || absl::IsAborted(status)) {
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
storage_generation_.generation = StorageGeneration::Unknown();
} else {
storage_generation_.generation =
options_.generation_conditions.if_not_equal;
}
promise_.SetResult(
kvstore::ReadResult::Unspecified(std::move(storage_generation_)));
return;
}
if (absl::IsNotFound(status)) {
promise_.SetResult(
kvstore::ReadResult::Missing(storage_generation_.time));
return;
}
promise_.SetResult(std::move(status));
return;
}
if (StorageGeneration::IsUnknown(storage_generation_.generation)) {
promise_.SetResult(
absl::InternalError("Object missing a valid generation"));
return;
}
if (options_.byte_range.size() == 0) {
value_.Clear();
} else if (crc32c_.has_value() && ComputeCrc32c(value_) != *crc32c_) {
promise_.SetResult(
absl::DataLossError("Object crc32c does not match expected crc32c"));
return;
}
promise_.SetResult(kvstore::ReadResult::Value(
std::move(value_), std::move(storage_generation_)));
}
};
struct WriteTask : public internal::AtomicReferenceCount<WriteTask>,
public grpc::ClientWriteReactor<WriteObjectRequest> {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
kvstore::WriteOptions options_;
Promise<TimestampedStorageGeneration> promise_;
std::string object_name_;
absl::Cord value_;
Storage::StubInterface* stub_ = nullptr;
WriteObjectRequest request_;
WriteObjectResponse response_;
TimestampedStorageGeneration write_result_;
size_t write_offset_;
absl::crc32c_t crc32c_;
int attempt_ = 0;
absl::Mutex mutex_;
std::unique_ptr<grpc::ClientContext> context_ ABSL_GUARDED_BY(mutex_);
void TryCancel() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
if (context_) context_->TryCancel();
}
void UpdateRequestForNextWrite() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
if (write_offset_ == 0) {
write_result_.time = absl::Now();
auto& resource =
*request_.mutable_write_object_spec()->mutable_resource();
resource.set_bucket(driver_->bucket_name());
resource.set_name(object_name_);
request_.mutable_write_object_spec()->set_object_size(value_.size());
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
auto gen = StorageGeneration::ToUint64(
options_.generation_conditions.if_equal);
request_.mutable_write_object_spec()->set_if_generation_match(gen);
}
} else {
request_.clear_write_object_spec();
}
request_.set_write_offset(write_offset_);
size_t next_write_offset =
std::min(write_offset_ + kMaxWriteBytes, value_.size());
auto& checksummed_data = *request_.mutable_checksummed_data();
checksummed_data.set_content(
value_.Subcord(write_offset_, next_write_offset - write_offset_));
auto chunk_crc32c = ComputeCrc32c(checksummed_data.content());
checksummed_data.set_crc32c(static_cast<uint32_t>(chunk_crc32c));
crc32c_ = absl::ConcatCrc32c(crc32c_, chunk_crc32c,
checksummed_data.content().size());
write_offset_ = next_write_offset;
if (write_offset_ == value_.size()) {
request_.mutable_object_checksums()->set_crc32c(
static_cast<uint32_t>(crc32c_));
request_.set_finish_write(true);
}
}
void Start(std::string object_name, absl::Cord value) {
ABSL_LOG_IF(INFO, gcs_grpc_logging) << "WriteTask " << object_name;
object_name_ = std::move(object_name);
value_ = std::move(value);
stub_ = driver_->get_stub().get();
promise_.ExecuteWhenNotNeeded([self = internal::IntrusivePtr<WriteTask>(
this)] { self->TryCancel(); });
Retry();
}
void Retry() ABSL_LOCKS_EXCLUDED(mutex_) {
if (!promise_.result_needed()) {
return;
}
write_offset_ = 0;
crc32c_ = absl::crc32c_t{0};
request_.Clear();
{
absl::MutexLock lock(&mutex_);
assert(context_ == nullptr);
context_ = driver_->AllocateContext();
intrusive_ptr_increment(this);
stub_->async()->WriteObject(context_.get(), &response_, this);
}
UpdateRequestForNextWrite();
auto options = grpc::WriteOptions();
if (request_.finish_write()) {
options.set_last_message();
}
StartWrite(&request_, options);
StartCall();
}
void OnWriteDone(bool ok) override {
if (!ok) return;
if (request_.finish_write()) return;
UpdateRequestForNextWrite();
auto options = grpc::WriteOptions();
if (request_.finish_write()) {
options.set_last_message();
}
StartWrite(&request_, options);
}
void OnDone(const grpc::Status& s) override {
internal::IntrusivePtr<WriteTask> self(this, internal::adopt_object_ref);
driver_->executor()(
[self = std::move(self), status = GrpcStatusToAbslStatus(s)] {
self->WriteFinished(std::move(status));
});
}
void WriteFinished(absl::Status status) {
if (!promise_.result_needed()) {
return;
}
auto latency = absl::Now() - write_result_.time;
gcs_grpc_metrics.write_latency_ms.Observe(
absl::ToInt64Milliseconds(latency));
{
absl::MutexLock lock(&mutex_);
context_ = nullptr;
}
if (!status.ok() && attempt_ == 0 &&
status.code() == absl::StatusCode::kUnauthenticated) {
attempt_++;
Retry();
return;
}
if (!status.ok() && IsRetriable(status)) {
status =
driver_->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (response_.has_resource()) {
write_result_.generation =
StorageGeneration::FromUint64(response_.resource().generation());
}
if (absl::IsFailedPrecondition(status) || absl::IsAlreadyExists(status)) {
write_result_.generation = StorageGeneration::Unknown();
promise_.SetResult(std::move(write_result_));
} else if (absl::IsNotFound(status) &&
!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
write_result_.generation = StorageGeneration::Unknown();
promise_.SetResult(std::move(write_result_));
} else if (!status.ok()) {
promise_.SetResult(status);
} else {
promise_.SetResult(std::move(write_result_));
}
}
};
struct DeleteTask : public internal::AtomicReferenceCount<DeleteTask> {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
kvstore::WriteOptions options_;
Promise<TimestampedStorageGeneration> promise_;
Storage::StubInterface* stub_ = nullptr;
absl::Time start_time_;
DeleteObjectRequest request_;
::google::protobuf::Empty response_;
int attempt_ = 0;
absl::Mutex mutex_;
std::unique_ptr<grpc::ClientContext> context_ ABSL_GUARDED_BY(mutex_);
void TryCancel() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
if (context_) context_->TryCancel();
}
void Start(const std::string& object_name) {
ABSL_LOG_IF(INFO, gcs_grpc_logging) << "DeleteTask " << object_name;
stub_ = driver_->get_stub().get();
promise_.ExecuteWhenNotNeeded([self = internal::IntrusivePtr<DeleteTask>(
this)] { self->TryCancel(); });
request_.set_bucket(driver_->bucket_name());
request_.set_object(object_name);
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
auto gen =
StorageGeneration::ToUint64(options_.generation_conditions.if_equal);
request_.set_if_generation_match(gen);
}
Retry();
}
void Retry() ABSL_LOCKS_EXCLUDED(mutex_) {
if (!promise_.result_needed()) {
return;
}
start_time_ = absl::Now();
{
absl::MutexLock lock(&mutex_);
assert(context_ == nullptr);
context_ = driver_->AllocateContext();
intrusive_ptr_increment(this);
stub_->async()->DeleteObject(
context_.get(), &request_, &response_,
WithExecutor(driver_->executor(), [this](::grpc::Status s) {
internal::IntrusivePtr<DeleteTask> self(this,
internal::adopt_object_ref);
self->DeleteFinished(GrpcStatusToAbslStatus(s));
}));
}
}
void DeleteFinished(absl::Status status) {
if (!promise_.result_needed()) {
return;
}
{
absl::MutexLock lock(&mutex_);
context_ = nullptr;
}
if (!status.ok() && attempt_ == 0 &&
status.code() == absl::StatusCode::kUnauthenticated) {
attempt_++;
Retry();
return;
}
if (!status.ok() && IsRetriable(status)) {
status =
driver_->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
TimestampedStorageGeneration r;
r.time = start_time_;
r.generation = StorageGeneration::NoValue();
if (absl::IsFailedPrecondition(status)) {
r.generation = StorageGeneration::Unknown();
} else if (absl::IsNotFound(status)) {
if (!options_.generation_conditions.MatchesNoValue()) {
r.generation = StorageGeneration::Unknown();
}
} else if (!status.ok()) {
promise_.SetResult(std::move(status));
return;
}
promise_.SetResult(std::move(r));
}
};
struct ListTask : public internal::AtomicReferenceCount<ListTask> {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
kvstore::ListOptions options_;
ListReceiver receiver_;
Storage::StubInterface* stub_ = nullptr;
ListObjectsRequest request;
ListObjectsResponse response;
int attempt_ = 0;
absl::Mutex mutex_;
std::unique_ptr<grpc::ClientContext> context_ ABSL_GUARDED_BY(mutex_);
bool cancelled_ ABSL_GUARDED_BY(mutex_) = false;
ListTask(internal::IntrusivePtr<GcsGrpcKeyValueStore>&& driver,
kvstore::ListOptions&& options, ListReceiver&& receiver)
: driver_(std::move(driver)),
options_(std::move(options)),
receiver_(std::move(receiver)) {
execution::set_starting(receiver_, [this] { TryCancel(); });
}
~ListTask() {
{
absl::MutexLock l(&mutex_);
context_ = nullptr;
}
driver_ = {};
execution::set_stopping(receiver_);
}
bool is_cancelled() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock l(&mutex_);
return cancelled_;
}
void TryCancel() ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock l(&mutex_);
if (!cancelled_) {
cancelled_ = true;
if (context_) context_->TryCancel();
}
}
void Start() {
ABSL_LOG_IF(INFO, gcs_grpc_logging) << "ListTask " << options_.range;
stub_ = driver_->get_stub().get();
request.set_lexicographic_start(options_.range.inclusive_min);
request.set_lexicographic_end(options_.range.exclusive_max);
request.set_parent(driver_->bucket_name());
request.set_page_size(1000);
Retry();
}
void Retry() ABSL_LOCKS_EXCLUDED(mutex_) {
if (is_cancelled()) {
execution::set_done(receiver_);
return;
}
{
absl::MutexLock lock(&mutex_);
context_ = driver_->AllocateContext();
intrusive_ptr_increment(this);
stub_->async()->ListObjects(
context_.get(), &request, &response,
WithExecutor(driver_->executor(), [this](::grpc::Status s) {
internal::IntrusivePtr<ListTask> self(this,
internal::adopt_object_ref);
self->ListFinished(GrpcStatusToAbslStatus(s));
}));
}
}
void ListFinished(absl::Status status) {
if (is_cancelled()) {
execution::set_done(receiver_);
return;
}
if (!status.ok() && IsRetriable(status)) {
status =
driver_->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
execution::set_error(receiver_, std::move(status));
return;
}
bool done = false;
for (const auto& o : response.objects()) {
if (is_cancelled()) {
done = true;
break;
}
std::string_view name = o.name();
if (!Contains(options_.range, name)) {
if (KeyRange::CompareKeyAndExclusiveMax(
name, options_.range.exclusive_max) >= 0) {
done = true;
break;
}
continue;
}
if (options_.strip_prefix_length) {
name = name.substr(options_.strip_prefix_length);
}
execution::set_value(receiver_, ListEntry{
std::string(name),
ListEntry::checked_size(o.size()),
});
}
if (!done && !response.next_page_token().empty()) {
request.set_page_token(response.next_page_token());
response.Clear();
attempt_ = 0;
Retry();
return;
}
execution::set_done(receiver_);
}
};
struct DeleteRangeListReceiver {
internal::IntrusivePtr<GcsGrpcKeyValueStore> driver_;
Promise<void> promise_;
FutureCallbackRegistration cancel_registration_;
void set_starting(AnyCancelReceiver cancel) {
cancel_registration_ = promise_.ExecuteWhenNotNeeded(std::move(cancel));
}
void set_value(ListEntry entry) {
assert(!entry.key.empty());
if (!entry.key.empty()) {
LinkError(promise_, driver_->Delete(std::move(entry.key)));
}
}
void set_error(absl::Status error) {
SetDeferredResult(promise_, std::move(error));
promise_ = Promise<void>();
}
void set_done() { promise_ = Promise<void>(); }
void set_stopping() {
cancel_registration_.Unregister();
driver_ = {};
}
};
Future<kvstore::ReadResult> GcsGrpcKeyValueStore::Read(Key key,
ReadOptions options) {
gcs_grpc_metrics.read.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid blob object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal) ||
!IsValidStorageGeneration(options.generation_conditions.if_not_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
return internal_kvstore_batch::HandleBatchRequestByGenericByteRangeCoalescing(
*this, std::move(key), std::move(options));
}
Future<kvstore::ReadResult> GcsGrpcKeyValueStore::ReadImpl(
Key&& key, ReadOptions&& options) {
gcs_grpc_metrics.batch_read.Increment();
auto op = PromiseFuturePair<ReadResult>::Make();
auto task = internal::MakeIntrusivePtr<ReadTask>();
task->driver_ = internal::IntrusivePtr<GcsGrpcKeyValueStore>(this);
task->options_ = std::move(options);
task->promise_ = std::move(op.promise);
task->Start(key);
return std::move(op.future);
}
Future<TimestampedStorageGeneration> GcsGrpcKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
gcs_grpc_metrics.write.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid blob object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
auto op = PromiseFuturePair<TimestampedStorageGeneration>::Make();
if (!value) {
auto task = internal::MakeIntrusivePtr<DeleteTask>();
task->driver_ = internal::IntrusivePtr<GcsGrpcKeyValueStore>(this);
task->options_ = std::move(options);
task->promise_ = std::move(op.promise);
task->Start(key);
} else {
auto task = internal::MakeIntrusivePtr<WriteTask>();
task->driver_ = internal::IntrusivePtr<GcsGrpcKeyValueStore>(this);
task->options_ = std::move(options);
task->promise_ = std::move(op.promise);
task->Start(key, *std::move(value));
}
return std::move(op.future);
}
void GcsGrpcKeyValueStore::ListImpl(ListOptions options,
ListReceiver receiver) {
gcs_grpc_metrics.list.Increment();
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
auto task = internal::MakeIntrusivePtr<ListTask>(
internal::IntrusivePtr<GcsGrpcKeyValueStore>(this), std::move(options),
std::move(receiver));
task->Start();
}
Future<const void> GcsGrpcKeyValueStore::DeleteRange(KeyRange range) {
gcs_grpc_metrics.delete_range.Increment();
if (range.empty()) return absl::OkStatus();
auto op = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
ListOptions list_options;
list_options.range = std::move(range);
ListImpl(list_options, DeleteRangeListReceiver{
internal::IntrusivePtr<GcsGrpcKeyValueStore>(this),
std::move(op.promise)});
return std::move(op.future);
}
Future<kvstore::DriverPtr> GcsGrpcKeyValueStoreSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<GcsGrpcKeyValueStore>();
driver->spec_ = data_;
driver->bucket_ = absl::StrFormat("projects/_/buckets/%s", data_.bucket);
std::string endpoint = data_.endpoint;
if (endpoint.empty()) {
endpoint = "dns:
}
auto channel_credentials =
GetCredentialsForEndpoint(endpoint, driver->call_credentials_fn_);
driver->storage_stub_pool_ = GetSharedStorageStubPool(
endpoint, data_.num_channels, std::move(channel_credentials));
if (driver->spec_.wait_for_connection > absl::ZeroDuration()) {
driver->storage_stub_pool_->WaitForConnected(
driver->spec_.wait_for_connection);
}
return driver;
}
Result<kvstore::Spec> ParseGcsGrpcUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == kUriScheme);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
if (!IsValidBucketName(parsed.authority)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GCS bucket name: ", QuoteString(parsed.authority)));
}
auto decoded_path = parsed.path.empty()
? std::string()
: internal::PercentDecode(parsed.path.substr(1));
auto driver_spec = internal::MakeIntrusivePtr<GcsGrpcKeyValueStoreSpec>();
driver_spec->data_.bucket = std::string(parsed.authority);
driver_spec->data_.user_project =
Context::Resource<GcsUserProjectResource>::DefaultSpec();
driver_spec->data_.retries =
Context::Resource<internal_storage_gcs::GcsRequestRetries>::DefaultSpec();
driver_spec->data_.data_copy_concurrency =
Context::Resource<DataCopyConcurrencyResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec), std::move(decoded_path)};
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::GcsGrpcKeyValueStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::GcsGrpcKeyValueStoreSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{kUriScheme, tensorstore::ParseGcsGrpcUrl};
} | #include <stddef.h>
#include <cstring>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/support/sync_stream.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/internal/grpc/grpc_mock.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/gcs_grpc/mock_storage_service.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/proto/parse_text_proto_or_die.h"
#include "tensorstore/proto/protobuf_matchers.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "google/storage/v2/storage.pb.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::CompletionNotifyingReceiver;
using ::tensorstore::Context;
using ::tensorstore::KeyRange;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::ParseTextProtoOrDie;
using ::tensorstore::StorageGeneration;
using ::tensorstore::grpc_mocker::MockGrpcServer;
using ::tensorstore::internal::AbslStatusToGrpcStatus;
using ::tensorstore::internal::FlatCordBuilder;
using ::tensorstore_grpc::MockStorage;
using ::testing::_;
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::Return;
using ::testing::SetArgPointee;
using ::google::storage::v2::DeleteObjectRequest;
using ::google::storage::v2::ListObjectsRequest;
using ::google::storage::v2::ListObjectsResponse;
using ::google::storage::v2::ReadObjectRequest;
using ::google::storage::v2::ReadObjectResponse;
using ::google::storage::v2::WriteObjectRequest;
using ::google::storage::v2::WriteObjectResponse;
class GcsGrpcTest : public testing::Test {
public:
tensorstore::KvStore OpenStore() {
ABSL_LOG(INFO) << "Using " << mock_service_.server_address();
return kvstore::Open({{"driver", "gcs_grpc"},
{"endpoint", mock_service_.server_address()},
{"bucket", "bucket"},
{"timeout", "100ms"}})
.value();
}
MockStorage& mock() { return *mock_service_.service(); }
tensorstore::grpc_mocker::MockGrpcServer<MockStorage> mock_service_;
};
TEST_F(GcsGrpcTest, Read) {
ReadObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
)pb");
ReadObjectResponse response = ParseTextProtoOrDie(R"pb(
metadata { generation: 2 }
checksummed_data { content: '1234' }
)pb");
EXPECT_CALL(mock(), ReadObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, auto*,
grpc::ServerWriter<ReadObjectResponse>* resp) -> ::grpc::Status {
resp->Write(response);
return grpc::Status::OK;
}));
auto start = absl::Now();
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto result, kvstore::Read(store, expected_request.object()).result());
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value, "1234");
EXPECT_GT(result.stamp.time, start);
EXPECT_EQ(result.stamp.generation, StorageGeneration::FromUint64(2));
}
TEST_F(GcsGrpcTest, ReadRetry) {
ReadObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
)pb");
ReadObjectResponse response = ParseTextProtoOrDie(R"pb(
metadata { generation: 2 }
checksummed_data { content: '1234' }
)pb");
::testing::Sequence s1;
EXPECT_CALL(mock(), ReadObject(_, EqualsProto(expected_request), _))
.Times(2)
.InSequence(s1)
.WillRepeatedly(testing::Return(
AbslStatusToGrpcStatus(absl::ResourceExhaustedError(""))));
EXPECT_CALL(mock(), ReadObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.InSequence(s1)
.WillRepeatedly(testing::Invoke(
[&](auto*, auto*,
grpc::ServerWriter<ReadObjectResponse>* resp) -> ::grpc::Status {
resp->Write(response);
return grpc::Status::OK;
}));
auto start = absl::Now();
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto result, kvstore::Read(store, expected_request.object()).result());
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value, "1234");
EXPECT_GT(result.stamp.time, start);
EXPECT_EQ(result.stamp.generation, StorageGeneration::FromUint64(2));
}
TEST_F(GcsGrpcTest, ReadWithOptions) {
ReadObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
if_generation_not_match: 3
if_generation_match: 1
read_offset: 1
read_limit: 9
)pb");
ReadObjectResponse response = ParseTextProtoOrDie(R"pb(
metadata { generation: 2 }
checksummed_data { content: '1234' }
)pb");
EXPECT_CALL(mock(), ReadObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, auto*,
grpc::ServerWriter<ReadObjectResponse>* resp) -> ::grpc::Status {
resp->Write(response);
return grpc::Status::OK;
}));
kvstore::ReadOptions options;
options.generation_conditions.if_not_equal = StorageGeneration::FromUint64(3);
options.generation_conditions.if_equal = StorageGeneration::FromUint64(1);
options.staleness_bound = absl::InfiniteFuture();
options.byte_range = OptionalByteRangeRequest{1, 10};
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto result,
kvstore::Read(store, expected_request.object(), options).result());
}
TEST_F(GcsGrpcTest, Write) {
std::vector<WriteObjectRequest> requests;
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'abc' bucket: "projects/_/buckets/bucket" generation: 1 }
)pb");
EXPECT_CALL(mock(), WriteObject(_, _, _))
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), absl::Cord("abcd"))
.result());
EXPECT_THAT(
requests,
testing::AllOf(
testing::SizeIs(testing::Ge(1)),
testing::Each(EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "abc" bucket: "projects/_/buckets/bucket" }
object_size: 4
}
checksummed_data { content: "abcd" crc32c: 2462583345 }
object_checksums { crc32c: 2462583345 }
finish_write: true
write_offset: 0
)pb"))));
}
TEST_F(GcsGrpcTest, WriteRetry) {
std::vector<WriteObjectRequest> requests;
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'abc' bucket: 'bucket' generation: 1 }
)pb");
::testing::Sequence s1;
EXPECT_CALL(mock(), WriteObject)
.InSequence(s1)
.WillOnce(testing::Return(
AbslStatusToGrpcStatus(absl::ResourceExhaustedError(""))));
EXPECT_CALL(mock(), WriteObject)
.InSequence(s1)
.WillOnce(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
if (reader->Read(&req)) {
requests.push_back(req);
}
return AbslStatusToGrpcStatus(absl::ResourceExhaustedError(""));
}));
EXPECT_CALL(mock(), WriteObject)
.Times(AtLeast(1))
.InSequence(s1)
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), absl::Cord("abcd"))
.result());
EXPECT_THAT(
requests,
testing::AllOf(
testing::SizeIs(testing::Ge(2)),
testing::Each(EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "abc" bucket: "projects/_/buckets/bucket" }
object_size: 4
}
checksummed_data { content: "abcd" crc32c: 2462583345 }
object_checksums { crc32c: 2462583345 }
finish_write: true
write_offset: 0
)pb"))));
}
TEST_F(GcsGrpcTest, WriteEmpty) {
std::vector<WriteObjectRequest> requests;
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'abc' bucket: 'projects/_/buckets/bucket' generation: 1 }
)pb");
EXPECT_CALL(mock(), WriteObject)
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), absl::Cord()).result());
EXPECT_THAT(
requests,
testing::AllOf(
testing::SizeIs(testing::Ge(1)),
testing::Each(EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "abc" bucket: "projects/_/buckets/bucket" }
object_size: 0
}
checksummed_data { crc32c: 0 }
object_checksums { crc32c: 0 }
finish_write: true
write_offset: 0
)pb"))));
}
TEST_F(GcsGrpcTest, WriteWithOptions) {
std::vector<WriteObjectRequest> requests;
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'abc' bucket: "projects/_/buckets/bucket" generation: 1 }
)pb");
EXPECT_CALL(mock(), WriteObject)
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), absl::Cord("abcd"),
{StorageGeneration::FromUint64(3)})
.result());
EXPECT_THAT(
requests,
testing::AllOf(
testing::SizeIs(testing::Ge(1)),
testing::Each(EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "abc" bucket: "projects/_/buckets/bucket" }
if_generation_match: 3
object_size: 4
}
checksummed_data { content: "abcd" crc32c: 2462583345 }
object_checksums { crc32c: 2462583345 }
finish_write: true
write_offset: 0
)pb"))));
}
TEST_F(GcsGrpcTest, WriteMultipleRequests) {
WriteObjectResponse response = ParseTextProtoOrDie(R"pb(
resource { name: 'bigly' bucket: "projects/_/buckets/bucket" generation: 1 }
)pb");
std::vector<WriteObjectRequest> requests;
EXPECT_CALL(mock(), WriteObject)
.Times(AtLeast(1))
.WillRepeatedly(testing::Invoke(
[&](auto*, grpc::ServerReader<WriteObjectRequest>* reader,
auto* resp) -> ::grpc::Status {
WriteObjectRequest req;
while (reader->Read(&req)) {
size_t len = req.checksummed_data().content().size();
req.mutable_checksummed_data()->set_content(
absl::StrFormat("size: %d", len));
requests.push_back(std::move(req));
}
resp->CopyFrom(response);
return grpc::Status::OK;
}));
FlatCordBuilder cord_builder(16 + 2 * 1048576);
memset(cord_builder.data(), 0x37, cord_builder.size());
absl::Cord data = std::move(cord_builder).Build();
data.Append("abcd");
EXPECT_EQ(data.size(), 2097172);
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, response.resource().name(), data).result());
ASSERT_THAT(requests, testing::SizeIs(testing::Ge(2)));
EXPECT_THAT(
tensorstore::span(&requests[requests.size() - 2], 2),
testing::ElementsAre(
EqualsProto<WriteObjectRequest>(R"pb(
write_object_spec {
resource { name: "bigly" bucket: "projects/_/buckets/bucket" }
object_size: 2097172
}
checksummed_data { content: "size: 2097152", crc32c: 2470751355 }
write_offset: 0
)pb"),
EqualsProto<WriteObjectRequest>(R"pb(
checksummed_data { content: "size: 20", crc32c: 2394860217 }
object_checksums { crc32c: 1181131586 }
finish_write: true
write_offset: 2097152
)pb")));
}
TEST_F(GcsGrpcTest, WriteNullopt) {
DeleteObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
if_generation_match: 0
)pb");
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Write(store, expected_request.object(), std::nullopt,
{StorageGeneration::NoValue()})
.result());
}
TEST_F(GcsGrpcTest, Delete) {
DeleteObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
)pb");
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation,
kvstore::Delete(store, expected_request.object()).result());
}
TEST_F(GcsGrpcTest, DeleteWithOptions) {
DeleteObjectRequest expected_request = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'abc'
if_generation_match: 2
)pb");
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(expected_request), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto generation, kvstore::Delete(store, expected_request.object(),
{StorageGeneration::FromUint64(2)})
.result());
}
TEST_F(GcsGrpcTest, DeleteRange) {
ListObjectsRequest request1 = ParseTextProtoOrDie(R"pb(
parent: 'projects/_/buckets/bucket'
page_size: 1000
lexicographic_start: 'a/c'
lexicographic_end: 'a/d'
)pb");
ListObjectsResponse response1 = ParseTextProtoOrDie(R"pb(
objects { name: 'a/c' }
objects { name: 'a/ce' }
)pb");
DeleteObjectRequest request2 = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'a/c'
)pb");
DeleteObjectRequest request3 = ParseTextProtoOrDie(R"pb(
bucket: 'projects/_/buckets/bucket'
object: 'a/ce'
)pb");
EXPECT_CALL(mock(), ListObjects(_, EqualsProto(request1), _))
.Times(AtLeast(1))
.WillRepeatedly(
DoAll(SetArgPointee<2>(response1), Return(grpc::Status::OK)));
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(request2), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
EXPECT_CALL(mock(), DeleteObject(_, EqualsProto(request3), _))
.Times(AtLeast(1))
.WillRepeatedly(Return(grpc::Status::OK));
auto store = OpenStore();
TENSORSTORE_EXPECT_OK(
kvstore::DeleteRange(store, KeyRange::Prefix("a/c")).result());
}
TEST_F(GcsGrpcTest, List) {
ListObjectsRequest request1 = ParseTextProtoOrDie(R"pb(
parent: 'projects/_/buckets/bucket'
page_size: 1000
)pb");
ListObjectsRequest request2 = ParseTextProtoOrDie(R"pb(
parent: 'projects/_/buckets/bucket'
page_size: 1000
page_token: 'next-page-token'
)pb");
ListObjectsResponse response1 = ParseTextProtoOrDie(R"pb(
objects { name: 'a' }
objects { name: 'b' }
next_page_token: 'next-page-token'
)pb");
ListObjectsResponse response2 = ParseTextProtoOrDie(R"pb(
objects { name: 'c' }
)pb");
EXPECT_CALL(mock(), ListObjects(_, EqualsProto(request1), _))
.Times(AtLeast(1))
.WillRepeatedly(
DoAll(SetArgPointee<2>(response1), Return(grpc::Status::OK)));
EXPECT_CALL(mock(), ListObjects(_, EqualsProto(request2), _))
.Times(AtLeast(1))
.WillRepeatedly(
DoAll(SetArgPointee<2>(response2), Return(grpc::Status::OK)));
auto store = OpenStore();
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a", "set_value: b",
"set_value: c", "set_done", "set_stopping"));
}
TEST(GcsGrpcSpecTest, InvalidSpec) {
auto context = Context::Default();
EXPECT_THAT(kvstore::Open({{"driver", "gcs_grpc"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open({{"driver", "gcs_grpc"}, {"bucket", "bucket:xyz"}}, context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open(
{{"driver", "gcs_grpc"}, {"bucket", "my-bucket"}, {"path", "a\tb"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid GCS path.*"));
}
TEST(GcsGrpcUrlTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "gcs_grpc"}, {"bucket", "my-bucket"}, {"path", "abc"}},
"gcs_grpc:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "gcs_grpc"}, {"bucket", "my-bucket"}, {"path", "abc def"}},
"gcs_grpc:
}
TEST(GcsGrpcUrlTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Invalid GCS bucket name: \"bucket:xyz\""));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Query string not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("gcs_grpc:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid GCS path.*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_grpc/gcs_grpc.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_grpc/gcs_grpc_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
dc8ace3e-4756-4385-8a98-8b67b7737979 | cpp | google/tensorstore | zip_dir_cache | tensorstore/kvstore/zip/zip_dir_cache.cc | tensorstore/kvstore/zip/zip_dir_cache_test.cc | #include "tensorstore/kvstore/zip/zip_dir_cache.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/compression/zip_details.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
namespace tensorstore {
namespace internal_zip_kvstore {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag zip_logging("zip");
struct ReadDirectoryOp
: public internal::AtomicReferenceCount<ReadDirectoryOp> {
ZipDirectoryCache::Entry* entry_;
std::shared_ptr<const Directory> existing_read_data_;
kvstore::ReadOptions options_;
internal_zip::ZipEOCD eocd_;
void StartEOCDBlockRead() {
auto& cache = internal::GetOwningCache(*entry_);
ABSL_LOG_IF(INFO, zip_logging)
<< "StartEOCDBlockRead " << entry_->key() << " " << options_.byte_range;
auto future =
cache.kvstore_driver_->Read(std::string(entry_->key()), options_);
future.Force();
future.ExecuteWhenReady(
[self = internal::IntrusivePtr<ReadDirectoryOp>(this)](
ReadyFuture<kvstore::ReadResult> ready) {
self->OnEOCDBlockRead(std::move(ready));
});
}
void OnEOCDBlockRead(ReadyFuture<kvstore::ReadResult> ready) {
auto& r = ready.result();
if (!r.ok()) {
ABSL_LOG_IF(INFO, zip_logging) << r.status();
if (absl::IsOutOfRange(r.status())) {
assert(!options_.byte_range.IsFull());
options_.byte_range = OptionalByteRangeRequest{};
StartEOCDBlockRead();
return;
}
entry_->ReadError(
internal::ConvertInvalidArgumentToFailedPrecondition(r.status()));
return;
}
auto& read_result = *r;
if (read_result.aborted()) {
entry_->ReadSuccess(ZipDirectoryCache::ReadState{
entry_->read_request_state_.read_state.data,
std::move(read_result.stamp)});
return;
}
if (read_result.not_found()) {
entry_->ReadError(absl::NotFoundError(""));
return;
}
GetOwningCache(*entry_).executor()(
[self = internal::IntrusivePtr<ReadDirectoryOp>(this),
ready = std::move(ready)]() {
self->DoDecodeEOCDBlock(std::move(ready));
});
}
void DoDecodeEOCDBlock(ReadyFuture<kvstore::ReadResult> ready) {
absl::Cord* eocd_block = &ready.value().value;
riegeli::CordReader<absl::Cord*> reader(eocd_block);
int64_t block_offset =
options_.byte_range.IsFull() ? 0 : options_.byte_range.inclusive_min;
auto read_eocd_variant = TryReadFullEOCD(reader, eocd_, block_offset);
if (auto* status = std::get_if<absl::Status>(&read_eocd_variant);
status != nullptr && !status->ok()) {
entry_->ReadError(std::move(*status));
return;
}
if (auto* inclusive_min = std::get_if<int64_t>(&read_eocd_variant);
inclusive_min != nullptr) {
assert(!options_.byte_range.IsFull());
options_.byte_range = OptionalByteRangeRequest::Suffix(*inclusive_min);
StartEOCDBlockRead();
return;
}
if (block_offset >= 0 && block_offset <= eocd_.cd_offset) {
DoDecodeDirectory(std::move(ready), eocd_.cd_offset - block_offset);
return;
}
kvstore::ReadOptions other_options = options_;
other_options.generation_conditions.if_equal =
ready.value().stamp.generation;
other_options.byte_range = OptionalByteRangeRequest::Range(
eocd_.cd_offset, eocd_.cd_offset + eocd_.cd_size);
auto& cache = internal::GetOwningCache(*entry_);
auto future =
cache.kvstore_driver_->Read(std::string(entry_->key()), other_options);
future.Force();
future.ExecuteWhenReady(
[self = internal::IntrusivePtr<ReadDirectoryOp>(this)](
ReadyFuture<kvstore::ReadResult> ready) {
self->OnDirectoryBlockRead(std::move(ready));
});
}
void OnDirectoryBlockRead(ReadyFuture<kvstore::ReadResult> ready) {
auto& r = ready.result();
if (!r.ok()) {
ABSL_LOG_IF(INFO, zip_logging) << r.status();
entry_->ReadError(
internal::ConvertInvalidArgumentToFailedPrecondition(r.status()));
return;
}
auto& read_result = *r;
if (read_result.aborted() || read_result.not_found() ||
!ready.value().has_value()) {
entry_->ReadError(
absl::InvalidArgumentError("Faild to read ZIP directory"));
return;
}
GetOwningCache(*entry_).executor()(
[self = internal::IntrusivePtr<ReadDirectoryOp>(this),
ready = std::move(ready)]() {
self->DoDecodeDirectory(std::move(ready), 0);
});
}
void DoDecodeDirectory(ReadyFuture<kvstore::ReadResult> ready,
size_t seek_pos) {
absl::Cord* cd_block = &ready.value().value;
riegeli::CordReader<absl::Cord*> reader(cd_block);
if (seek_pos > 0) {
reader.Seek(seek_pos);
}
Directory dir{};
dir.full_read = options_.byte_range.IsFull();
dir.entries.reserve(eocd_.num_entries);
for (size_t i = 0; i < eocd_.num_entries; ++i) {
internal_zip::ZipEntry entry{};
if (auto entry_status = ReadCentralDirectoryEntry(reader, entry);
!entry_status.ok()) {
entry_->ReadError(entry_status);
return;
}
if (ValidateEntryIsSupported(entry).ok()) {
ABSL_LOG_IF(INFO, zip_logging) << "Adding " << entry;
dir.entries.push_back(
Directory::Entry{entry.filename, entry.crc, entry.compressed_size,
entry.uncompressed_size, entry.local_header_offset,
entry.estimated_read_size});
} else {
ABSL_LOG_IF(INFO, zip_logging) << "Skipping " << entry;
}
}
std::sort(dir.entries.begin(), dir.entries.end(),
[](const auto& a, const auto& b) {
return std::tie(a.local_header_offset, a.filename) <
std::tie(b.local_header_offset, b.filename);
});
auto last_header_offset = eocd_.cd_offset;
for (auto it = dir.entries.rbegin(); it != dir.entries.rend(); ++it) {
it->estimated_size = last_header_offset - it->local_header_offset;
last_header_offset = it->local_header_offset;
}
std::sort(dir.entries.begin(), dir.entries.end(),
[](const auto& a, const auto& b) {
return std::tie(a.filename, a.local_header_offset) <
std::tie(b.filename, a.local_header_offset);
});
ABSL_LOG_IF(INFO, zip_logging) << dir;
entry_->ReadSuccess(ZipDirectoryCache::ReadState{
std::make_shared<const Directory>(std::move(dir)),
std::move(ready.value().stamp)});
}
};
}
size_t ZipDirectoryCache::Entry::ComputeReadDataSizeInBytes(
const void* read_data) {
return internal::EstimateHeapUsage(*static_cast<const ReadData*>(read_data));
}
void ZipDirectoryCache::Entry::DoRead(AsyncCacheReadRequest request) {
auto state = internal::MakeIntrusivePtr<ReadDirectoryOp>();
state->entry_ = this;
{
ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData> lock(*this);
state->existing_read_data_ = lock.shared_data();
state->options_.generation_conditions.if_not_equal =
lock.read_state().stamp.generation;
}
state->options_.staleness_bound = request.staleness_bound;
if (state->existing_read_data_ && state->existing_read_data_->full_read) {
state->options_.byte_range = OptionalByteRangeRequest{};
} else {
state->options_.byte_range =
OptionalByteRangeRequest::SuffixLength(internal_zip::kEOCDBlockSize);
}
state->StartEOCDBlockRead();
}
ZipDirectoryCache::Entry* ZipDirectoryCache::DoAllocateEntry() {
return new Entry;
}
size_t ZipDirectoryCache::DoGetSizeofEntry() { return sizeof(Entry); }
}
} | #include "tensorstore/kvstore/zip/zip_dir_cache.h"
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/flags/flag.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Context;
using ::tensorstore::InlineExecutor;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal_zip_kvstore::Directory;
using ::tensorstore::internal_zip_kvstore::ZipDirectoryCache;
using ::tensorstore::kvstore::DriverPtr;
ABSL_FLAG(std::string, tensorstore_test_data, "",
"Path to internal/compression/testdata/data.zip");
namespace {
absl::Cord GetTestZipFileData() {
ABSL_CHECK(!absl::GetFlag(FLAGS_tensorstore_test_data).empty());
absl::Cord filedata;
TENSORSTORE_CHECK_OK(riegeli::ReadAll(
riegeli::FdReader(absl::GetFlag(FLAGS_tensorstore_test_data)), filedata));
ABSL_CHECK_EQ(filedata.size(), 319482);
return filedata;
}
TEST(ZipDirectoryKvsTest, Basic) {
auto context = Context::Default();
auto pool = CachePool::Make(CachePool::Limits{});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::KvStore memory,
tensorstore::kvstore::Open({{"driver", "memory"}}, context).result());
ASSERT_THAT(
tensorstore::kvstore::Write(memory, "data.zip", GetTestZipFileData())
.result(),
::tensorstore::IsOk());
auto cache = GetCache<ZipDirectoryCache>(pool.get(), "", [&] {
return std::make_unique<ZipDirectoryCache>(memory.driver, InlineExecutor{});
});
auto entry = GetCacheEntry(cache, "data.zip");
auto status = entry->Read({absl::InfinitePast()}).status();
ASSERT_THAT(status, ::tensorstore::IsOk());
ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData> lock(*entry);
auto* dir = lock.data();
ASSERT_THAT(dir, ::testing::NotNull());
ASSERT_THAT(dir->entries, ::testing::SizeIs(3));
EXPECT_THAT(dir->entries[0].filename, "data/a.png");
EXPECT_THAT(dir->entries[1].filename, "data/bb.png");
EXPECT_THAT(dir->entries[2].filename, "data/c.png");
}
TEST(ZipDirectoryKvsTest, MissingEntry) {
auto context = Context::Default();
auto pool = CachePool::Make(CachePool::Limits{});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::KvStore memory,
tensorstore::kvstore::Open({{"driver", "memory"}}, context).result());
auto cache = GetCache<ZipDirectoryCache>(pool.get(), "", [&] {
return std::make_unique<ZipDirectoryCache>(memory.driver, InlineExecutor{});
});
auto entry = GetCacheEntry(cache, "data.zip");
auto status = entry->Read({absl::InfinitePast()}).status();
EXPECT_THAT(status, ::tensorstore::StatusIs(absl::StatusCode::kNotFound));
}
static constexpr unsigned char kZipTest2[] = {
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d,
0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00,
0x00, 0x00, 0x04, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x55, 0x54,
0x09, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55,
0x78, 0x04, 0x00, 0x64, 0x00, 0x14, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a,
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x98,
0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69,
0x72, 0x2f, 0x55, 0x54, 0x09, 0x00, 0x03, 0x09, 0x15, 0xe4, 0x41, 0x9a,
0x15, 0xe4, 0x41, 0x55, 0x78, 0x04, 0x00, 0xe8, 0x03, 0x64, 0x00,
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d,
0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00,
0x00, 0x00, 0x0d, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69,
0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55, 0x54, 0x09, 0x00, 0x03,
0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x04, 0x00,
0xe8, 0x03, 0x64, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00,
0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00,
0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x55, 0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55,
0x78, 0x00, 0x00,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7b, 0x98, 0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x10, 0x00, 0xed, 0x41, 0x3c, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x55, 0x54, 0x05, 0x00, 0x03, 0x09,
0x15, 0xe4, 0x41, 0x55, 0x78, 0x00, 0x00,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00,
0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00,
0x05, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x77, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55,
0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x00, 0x00,
0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
0xca, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00,
};
TEST(ZipDirectoryKvsTest, MinimalZip) {
auto context = Context::Default();
auto pool = CachePool::Make(CachePool::Limits{});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::KvStore memory,
tensorstore::kvstore::Open({{"driver", "memory"}}, context).result());
ASSERT_THAT(tensorstore::kvstore::Write(
memory, "data.zip",
absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(kZipTest2),
sizeof(kZipTest2)),
[](auto) {}))
.result(),
::tensorstore::IsOk());
auto cache = GetCache<ZipDirectoryCache>(pool.get(), "", [&] {
return std::make_unique<ZipDirectoryCache>(memory.driver, InlineExecutor{});
});
auto entry = GetCacheEntry(cache, "data.zip");
auto status = entry->Read({absl::InfinitePast()}).status();
ASSERT_THAT(status, ::tensorstore::IsOk());
ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData> lock(*entry);
auto* dir = lock.data();
ASSERT_THAT(dir, ::testing::NotNull());
ASSERT_THAT(dir->entries, ::testing::SizeIs(2));
EXPECT_THAT(dir->entries[0].filename, "test");
EXPECT_THAT(dir->entries[1].filename, "testdir/test2");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zip/zip_dir_cache.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zip/zip_dir_cache_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d5e0aae3-f28b-414d-a342-b691170f8ef7 | cpp | google/tensorstore | zip_key_value_store | tensorstore/kvstore/zip/zip_key_value_store.cc | tensorstore/kvstore/zip/zip_key_value_store_test.cc | #include <stddef.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/cord_reader.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/compression/zip_details.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/zip/zip_dir_cache.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "absl/base/attributes.h"
#include "tensorstore/internal/cache_key/std_vector.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/std_vector.h"
#include "tensorstore/util/garbage_collection/std_vector.h"
using ::tensorstore::internal_zip_kvstore::Directory;
using ::tensorstore::internal_zip_kvstore::ZipDirectoryCache;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
ABSL_CONST_INIT internal_log::VerboseFlag zip_logging("zip");
auto zip_metrics = TENSORSTORE_KVSTORE_COMMON_READ_METRICS(zip);
struct ZipKvStoreSpecData {
kvstore::Spec base;
Context::Resource<internal::CachePoolResource> cache_pool;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.base, x.cache_pool, x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member("base", jb::Projection<&ZipKvStoreSpecData::base>()),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&ZipKvStoreSpecData::cache_pool>()),
jb::Member(
internal::DataCopyConcurrencyResource::id,
jb::Projection<&ZipKvStoreSpecData::data_copy_concurrency>())
);
};
class ZipKvStoreSpec
: public internal_kvstore::RegisteredDriverSpec<ZipKvStoreSpec,
ZipKvStoreSpecData> {
public:
static constexpr char id[] = "zip";
Future<kvstore::DriverPtr> DoOpen() const override;
absl::Status ApplyOptions(kvstore::DriverSpecOptions&& options) override {
return data_.base.driver.Set(std::move(options));
}
Result<kvstore::Spec> GetBase(std::string_view path) const override {
return data_.base;
}
};
class ZipKvStore
: public internal_kvstore::RegisteredDriver<ZipKvStore, ZipKvStoreSpec> {
public:
Future<ReadResult> Read(Key key, ReadOptions options) override;
std::string DescribeKey(std::string_view key) override {
return tensorstore::StrCat(QuoteString(key), " in ",
base_.driver->DescribeKey(base_.path));
}
void ListImpl(ListOptions options, ListReceiver receiver) override;
absl::Status GetBoundSpecData(ZipKvStoreSpecData& spec) const {
spec = spec_data_;
return absl::OkStatus();
}
kvstore::SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return base_.driver->GetSupportedFeatures(KeyRange::Singleton(base_.path));
}
Result<KvStore> GetBase(std::string_view path,
const Transaction& transaction) const override {
return KvStore(base_.driver, base_.path, transaction);
}
const Executor& executor() const {
return spec_data_.data_copy_concurrency->executor;
}
ZipKvStoreSpecData spec_data_;
kvstore::KvStore base_;
internal::PinnedCacheEntry<ZipDirectoryCache> cache_entry_;
};
Future<kvstore::DriverPtr> ZipKvStoreSpec::DoOpen() const {
return MapFutureValue(
InlineExecutor{},
[spec = internal::IntrusivePtr<const ZipKvStoreSpec>(this)](
kvstore::KvStore& base_kvstore) mutable
-> Result<kvstore::DriverPtr> {
std::string cache_key;
internal::EncodeCacheKey(&cache_key, base_kvstore.driver,
base_kvstore.path,
spec->data_.data_copy_concurrency);
auto& cache_pool = *spec->data_.cache_pool;
auto directory_cache = internal::GetCache<ZipDirectoryCache>(
cache_pool.get(), cache_key, [&] {
return std::make_unique<ZipDirectoryCache>(
base_kvstore.driver,
spec->data_.data_copy_concurrency->executor);
});
auto driver = internal::MakeIntrusivePtr<ZipKvStore>();
driver->base_ = std::move(base_kvstore);
driver->spec_data_ = std::move(spec->data_);
driver->cache_entry_ =
GetCacheEntry(directory_cache, driver->base_.path);
return driver;
},
kvstore::Open(data_.base));
}
struct ReadState : public internal::AtomicReferenceCount<ReadState> {
internal::IntrusivePtr<ZipKvStore> owner_;
kvstore::Key key_;
kvstore::ReadOptions options_;
void OnDirectoryReady(Promise<kvstore::ReadResult> promise) {
TimestampedStorageGeneration stamp;
kvstore::ReadOptions options;
options.staleness_bound = options_.staleness_bound;
options.byte_range = OptionalByteRangeRequest{};
size_t seek_pos = 0;
{
ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData> lock(
*(owner_->cache_entry_));
stamp = lock.stamp();
assert(lock.data());
const ZipDirectoryCache::ReadData& dir = *lock.data();
ABSL_LOG_IF(INFO, zip_logging) << dir;
auto it = std::lower_bound(
dir.entries.begin(), dir.entries.end(), key_,
[](const auto& e, const std::string& k) { return e.filename < k; });
if (it == dir.entries.end() || it->filename != key_) {
promise.SetResult(kvstore::ReadResult::Missing(std::move(stamp)));
return;
}
if (!options_.generation_conditions.Matches(stamp.generation)) {
promise.SetResult(kvstore::ReadResult::Unspecified(std::move(stamp)));
return;
}
if (dir.full_read) {
seek_pos = it->local_header_offset;
} else {
seek_pos = 0;
options.byte_range = OptionalByteRangeRequest::Range(
it->local_header_offset,
it->local_header_offset + it->estimated_size);
}
}
options.generation_conditions.if_equal = stamp.generation;
Link(WithExecutor(owner_->executor(),
[self = internal::IntrusivePtr<ReadState>(this),
seek_pos](Promise<kvstore::ReadResult> promise,
ReadyFuture<kvstore::ReadResult> ready) {
self->OnValueRead(std::move(promise), std::move(ready),
seek_pos);
}),
std::move(promise),
kvstore::Read(owner_->base_, {}, std::move(options)));
}
void OnValueRead(Promise<kvstore::ReadResult> promise,
ReadyFuture<kvstore::ReadResult> ready, size_t seek_pos) {
if (!promise.result_needed()) return;
if (!ready.status().ok()) {
promise.SetResult(ready.status());
return;
}
internal_zip::ZipEntry local_header{};
auto result = [&]() -> Result<kvstore::ReadResult> {
kvstore::ReadResult read_result = std::move(ready.value());
if (!read_result.has_value()) {
return read_result;
}
absl::Cord source = std::move(read_result.value);
riegeli::CordReader reader(&source);
reader.Seek(seek_pos);
TENSORSTORE_RETURN_IF_ERROR(ReadLocalEntry(reader, local_header));
TENSORSTORE_RETURN_IF_ERROR(ValidateEntryIsSupported(local_header));
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range,
options_.byte_range.Validate(local_header.uncompressed_size));
TENSORSTORE_ASSIGN_OR_RETURN(
auto entry_reader, internal_zip::GetReader(&reader, local_header));
if (byte_range.inclusive_min > 0) {
entry_reader->Skip(byte_range.inclusive_min);
}
if (!entry_reader->Read(byte_range.size(), read_result.value)) {
if (entry_reader->status().ok()) {
return absl::OutOfRangeError("Failed to read range");
}
return entry_reader->status();
}
return read_result;
}();
ABSL_LOG_IF(INFO, zip_logging && !result.ok()) << result.status() << "\n"
<< local_header;
promise.SetResult(std::move(result));
}
};
Future<kvstore::ReadResult> ZipKvStore::Read(Key key, ReadOptions options) {
auto state = internal::MakeIntrusivePtr<ReadState>();
state->owner_ = internal::IntrusivePtr<ZipKvStore>(this);
state->key_ = std::move(key);
state->options_ = options;
zip_metrics.read.Increment();
return PromiseFuturePair<kvstore::ReadResult>::LinkValue(
WithExecutor(
executor(),
[state = std::move(state)](Promise<ReadResult> promise,
ReadyFuture<const void>) {
if (!promise.result_needed()) return;
state->OnDirectoryReady(std::move(promise));
}),
cache_entry_->Read({options.staleness_bound}))
.future;
}
struct ListState : public internal::AtomicReferenceCount<ListState> {
internal::IntrusivePtr<ZipKvStore> owner_;
kvstore::ListOptions options_;
ListReceiver receiver_;
Promise<void> promise_;
Future<void> future_;
ListState(internal::IntrusivePtr<ZipKvStore>&& owner,
kvstore::ListOptions&& options, ListReceiver&& receiver)
: owner_(std::move(owner)),
options_(std::move(options)),
receiver_(std::move(receiver)) {
auto [promise, future] = PromiseFuturePair<void>::Make(MakeResult());
this->promise_ = std::move(promise);
this->future_ = std::move(future);
future_.Force();
execution::set_starting(receiver_, [promise = promise_] {
promise.SetResult(absl::CancelledError(""));
});
}
~ListState() {
auto& r = promise_.raw_result();
if (r.ok()) {
execution::set_done(receiver_);
} else {
execution::set_error(receiver_, r.status());
}
execution::set_stopping(receiver_);
}
void OnDirectoryReady() {
auto dir = ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData>(
*(owner_->cache_entry_))
.shared_data();
assert(dir);
auto it = std::lower_bound(
dir->entries.begin(), dir->entries.end(), options_.range.inclusive_min,
[](const auto& e, const std::string& k) { return e.filename < k; });
for (; it != dir->entries.end(); ++it) {
if (KeyRange::CompareKeyAndExclusiveMax(
it->filename, options_.range.exclusive_max) >= 0) {
break;
}
if (it->filename.size() >= options_.strip_prefix_length) {
execution::set_value(
receiver_,
ListEntry{it->filename.substr(options_.strip_prefix_length),
ListEntry::checked_size(it->uncompressed_size)});
}
}
}
};
void ZipKvStore::ListImpl(ListOptions options, ListReceiver receiver) {
auto state = internal::MakeIntrusivePtr<ListState>(
internal::IntrusivePtr<ZipKvStore>(this), std::move(options),
std::move(receiver));
auto* state_ptr = state.get();
zip_metrics.list.Increment();
LinkValue(WithExecutor(executor(),
[state = std::move(state)](Promise<void> promise,
ReadyFuture<const void>) {
state->OnDirectoryReady();
}),
state_ptr->promise_,
cache_entry_->Read({state_ptr->options_.staleness_bound}));
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(tensorstore::ZipKvStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::ZipKvStoreSpec>
registration;
} | #include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/flags/flag.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/notification.h"
#include <nlohmann/json.hpp>
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorstore/context.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
ABSL_FLAG(std::string, tensorstore_test_data, "",
"Path to internal/compression/testdata/data.zip");
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
static constexpr unsigned char kReadOpZip[] = {
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x5b,
0x19, 0x57, 0x93, 0xc0, 0x3a, 0x94, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00,
0x00, 0x00, 0x03, 0x00, 0x1c, 0x00, 0x6b, 0x65, 0x79, 0x55, 0x54, 0x09,
0x00, 0x03, 0x1b, 0xf3, 0xe8, 0x64, 0x1c, 0xf3, 0xe8, 0x64, 0x75, 0x78,
0x0b, 0x00, 0x01, 0x04, 0x6c, 0x35, 0x00, 0x00, 0x04, 0x53, 0x5f, 0x01,
0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x0a,
0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x5b, 0x19, 0x57, 0x93, 0xc0, 0x3a,
0x94, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xa4, 0x81, 0x00,
0x00, 0x00, 0x00, 0x6b, 0x65, 0x79, 0x55, 0x54, 0x05, 0x00, 0x03, 0x1b,
0xf3, 0xe8, 0x64, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0x6c, 0x35, 0x00,
0x00, 0x04, 0x53, 0x5f, 0x01, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x49, 0x00, 0x00, 0x00, 0x4d, 0x00,
0x00, 0x00, 0x00, 0x00,
};
absl::Cord GetReadOpZip() {
return absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(kReadOpZip),
sizeof(kReadOpZip)),
[](auto) {});
}
absl::Cord GetTestZipFileData() {
ABSL_CHECK(!absl::GetFlag(FLAGS_tensorstore_test_data).empty());
absl::Cord filedata;
TENSORSTORE_CHECK_OK(riegeli::ReadAll(
riegeli::FdReader(absl::GetFlag(FLAGS_tensorstore_test_data)), filedata));
ABSL_CHECK_EQ(filedata.size(), 319482);
return filedata;
}
class ZipKeyValueStoreTest : public ::testing::Test {
public:
ZipKeyValueStoreTest() : context_(Context::Default()) {}
void PrepareMemoryKvstore(absl::Cord value) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::KvStore memory,
tensorstore::kvstore::Open({{"driver", "memory"}}, context_).result());
TENSORSTORE_CHECK_OK(
tensorstore::kvstore::Write(memory, "data.zip", value).result());
}
tensorstore::Context context_;
};
TEST_F(ZipKeyValueStoreTest, Simple) {
PrepareMemoryKvstore(GetTestZipFileData());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "zip"},
{"base", {{"driver", "memory"}, {"path", "data.zip"}}}},
context_)
.result());
for (int i = 0; i < 2; ++i) {
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: data/a.png",
"set_value: data/bb.png", "set_value: data/c.png",
"set_done", "set_stopping"))
<< i;
}
{
kvstore::ListOptions options;
options.range = options.range.Prefix("data/b");
options.strip_prefix_length = 5;
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, options),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: bb.png", "set_done",
"set_stopping"));
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto read_result, kvstore::Read(store, "data/bb.png").result());
EXPECT_THAT(read_result,
MatchesKvsReadResult(
::testing::_,
::testing::Not(tensorstore::StorageGeneration::Unknown())));
EXPECT_THAT(read_result.value.size(), 106351);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto read_result, kvstore::Read(store, "data/zz.png").result());
EXPECT_THAT(read_result, MatchesKvsReadResultNotFound());
}
}
TEST_F(ZipKeyValueStoreTest, ReadOps) {
PrepareMemoryKvstore(GetReadOpZip());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "zip"},
{"base", {{"driver", "memory"}, {"path", "data.zip"}}}},
context_)
.result());
::tensorstore::internal::TestKeyValueStoreReadOps(
store, "key", absl::Cord("abcdefghijklmnop"), "missing_key");
}
TEST_F(ZipKeyValueStoreTest, InvalidSpec) {
auto context = tensorstore::Context::Default();
EXPECT_THAT(
kvstore::Open({{"driver", "zip"}, {"extra", "key"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(ZipKeyValueStoreTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.check_data_persists = false;
options.check_write_read = false;
options.check_data_after_serialization = false;
options.check_store_serialization = true;
options.full_spec = {{"driver", "zip"},
{"base", {{"driver", "memory"}, {"path", "abc.zip"}}}};
options.full_base_spec = {{"driver", "memory"}, {"path", "abc.zip"}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zip/zip_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zip/zip_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
95e40c78-e8f4-43ff-9fff-211c5845b9a2 | cpp | google/tensorstore | shard_format | tensorstore/kvstore/zarr3_sharding_indexed/shard_format.cc | tensorstore/kvstore/zarr3_sharding_indexed/shard_format_test.cc | #include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/wrapping_writer.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/unowned_to_shared.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_BINDER(ShardIndexLocationJsonBinder,
jb::Enum<ShardIndexLocation, const char*>({
{ShardIndexLocation::kStart, "start"},
{ShardIndexLocation::kEnd, "end"},
}));
absl::Status ShardIndexEntry::Validate(EntryId entry_id) const {
if (!IsMissing()) {
uint64_t exclusive_max;
if (internal::AddOverflow(offset, length, &exclusive_max) ||
exclusive_max > std::numeric_limits<int64_t>::max()) {
return absl::DataLossError(absl::StrFormat(
"Invalid shard index entry %d with offset=%d, length=%d", entry_id,
offset, length));
}
}
return absl::OkStatus();
}
absl::Status ShardIndexEntry::Validate(EntryId entry_id,
int64_t total_size) const {
if (auto status = Validate(entry_id); !status.ok()) return status;
auto byte_range = AsByteRange();
if (byte_range.exclusive_max > total_size) {
return absl::DataLossError(tensorstore::StrCat(
"Shard index entry ", entry_id, " with byte range ", byte_range,
" is invalid for shard of size ", total_size));
}
return absl::OkStatus();
}
Result<ShardIndex> DecodeShardIndex(const absl::Cord& input,
const ShardIndexParameters& parameters) {
assert(parameters.index_shape.back() == 2);
SharedArray<const void> entries;
TENSORSTORE_ASSIGN_OR_RETURN(
entries,
parameters.index_codec_state->DecodeArray(parameters.index_shape, input));
if (!IsContiguousLayout(entries, c_order)) {
entries = MakeCopy(entries);
}
return ShardIndex{
StaticDataTypeCast<const uint64_t, unchecked>(std::move(entries))};
}
Result<ShardIndex> DecodeShardIndexFromFullShard(
const absl::Cord& shard_data,
const ShardIndexParameters& shard_index_parameters) {
int64_t shard_index_size =
shard_index_parameters.index_codec_state->encoded_size();
if (shard_index_size > shard_data.size()) {
return absl::DataLossError(absl::StrFormat(
"Existing shard has size of %d bytes, but expected at least %d bytes",
shard_data.size(), shard_index_size));
}
absl::Cord encoded_shard_index;
switch (shard_index_parameters.index_location) {
case ShardIndexLocation::kStart:
encoded_shard_index = shard_data.Subcord(0, shard_index_size);
break;
case ShardIndexLocation::kEnd:
encoded_shard_index = shard_data.Subcord(
shard_data.size() - shard_index_size, shard_index_size);
break;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto shard_index,
DecodeShardIndex(encoded_shard_index, shard_index_parameters),
tensorstore::MaybeAnnotateStatus(_, "Error decoding shard index"));
return shard_index;
}
absl::Status EncodeShardIndex(riegeli::Writer& writer,
const ShardIndex& shard_index,
const ShardIndexParameters& parameters) {
riegeli::WrappingWriter wrapping_writer{&writer};
return parameters.index_codec_state->EncodeArray(shard_index.entries,
wrapping_writer);
}
absl::Status ValidateGridShape(span<const Index> grid_shape) {
if (grid_shape.size() > kMaxRank - 1) {
return absl::InvalidArgumentError(
absl::StrFormat("grid rank of %d exceeds maximum of %d",
grid_shape.size(), kMaxRank - 1));
}
if (ProductOfExtents(grid_shape) > kMaxNumEntries) {
return absl::InvalidArgumentError(
tensorstore::StrCat("grid shape of ", grid_shape, " has more than ",
kMaxNumEntries, " entries"));
}
return absl::OkStatus();
}
Result<ZarrCodecChain::Ptr> InitializeIndexCodecChain(
const ZarrCodecChainSpec& codec_chain_spec, DimensionIndex grid_rank,
ZarrCodecChainSpec* resolved_codec_chain_spec) {
if (grid_rank > kMaxRank - 1) {
return absl::InvalidArgumentError(absl::StrFormat(
"Rank of %d exceeds maximum ran of %d supported for sharding_indexed",
grid_rank, kMaxRank - 1));
}
static const uint64_t fill_value{std::numeric_limits<uint64_t>::max()};
internal_zarr3::ArrayCodecResolveParameters array_params;
array_params.dtype = dtype_v<uint64_t>;
array_params.rank = grid_rank + 1;
array_params.fill_value =
SharedArray<const void>(internal::UnownedToShared(&fill_value));
internal_zarr3::BytesCodecResolveParameters bytes_params;
return codec_chain_spec.Resolve(std::move(array_params), bytes_params,
resolved_codec_chain_spec);
}
absl::Status ShardIndexParameters::InitializeIndexShape(
span<const Index> grid_shape) {
TENSORSTORE_RETURN_IF_ERROR(ValidateGridShape(grid_shape));
num_entries = ProductOfExtents(grid_shape);
index_shape.resize(grid_shape.size() + 1);
std::copy(grid_shape.begin(), grid_shape.end(), index_shape.begin());
index_shape.back() = 2;
return absl::OkStatus();
}
absl::Status ShardIndexParameters::Initialize(
const ZarrCodecChainSpec& codec_chain_spec, span<const Index> grid_shape,
ZarrCodecChainSpec* resolved_codec_chain_spec) {
TENSORSTORE_ASSIGN_OR_RETURN(
index_codec_chain,
InitializeIndexCodecChain(codec_chain_spec, grid_shape.size(),
resolved_codec_chain_spec));
return Initialize(*index_codec_chain, grid_shape);
return absl::OkStatus();
}
absl::Status ShardIndexParameters::Initialize(const ZarrCodecChain& codec_chain,
span<const Index> grid_shape) {
if (index_codec_chain.get() != &codec_chain) {
index_codec_chain.reset(&codec_chain);
}
TENSORSTORE_RETURN_IF_ERROR(InitializeIndexShape(grid_shape));
TENSORSTORE_ASSIGN_OR_RETURN(index_codec_state,
index_codec_chain->Prepare(index_shape));
if (index_codec_state->encoded_size() == -1) {
return absl::InvalidArgumentError(
"Invalid index_codecs specified: only fixed-size encodings are "
"supported");
}
return absl::OkStatus();
}
Result<ShardEntries> DecodeShard(
const absl::Cord& shard_data,
const ShardIndexParameters& shard_index_parameters) {
const int64_t num_entries = shard_index_parameters.num_entries;
ShardEntries entries;
entries.entries.resize(num_entries);
TENSORSTORE_ASSIGN_OR_RETURN(
auto shard_index,
DecodeShardIndexFromFullShard(shard_data, shard_index_parameters));
for (int64_t i = 0; i < num_entries; ++i) {
const auto entry_index = shard_index[i];
if (entry_index.IsMissing()) continue;
TENSORSTORE_RETURN_IF_ERROR(entry_index.Validate(i, shard_data.size()));
entries.entries[i] =
internal::GetSubCord(shard_data, entry_index.AsByteRange());
}
return entries;
}
Result<std::optional<absl::Cord>> EncodeShard(
const ShardEntries& entries,
const ShardIndexParameters& shard_index_parameters) {
int64_t shard_index_size =
shard_index_parameters.index_codec_state->encoded_size();
absl::Cord shard_data;
riegeli::CordWriter writer{&shard_data};
auto shard_index_array = AllocateArray<uint64_t>(
shard_index_parameters.index_shape, c_order, default_init);
bool has_entry = false;
uint64_t offset =
shard_index_parameters.index_location == ShardIndexLocation::kStart
? shard_index_size
: 0;
for (size_t i = 0; i < entries.entries.size(); ++i) {
const auto& entry = entries.entries[i];
uint64_t entry_offset;
uint64_t length;
if (entry) {
has_entry = true;
length = entry->size();
entry_offset = offset;
offset += length;
ABSL_CHECK(writer.Write(*entry));
} else {
entry_offset = std::numeric_limits<uint64_t>::max();
length = std::numeric_limits<uint64_t>::max();
}
shard_index_array.data()[i * 2] = entry_offset;
shard_index_array.data()[i * 2 + 1] = length;
}
if (!has_entry) return std::nullopt;
switch (shard_index_parameters.index_location) {
case ShardIndexLocation::kStart: {
ABSL_CHECK(writer.Close());
absl::Cord encoded_shard_index;
riegeli::CordWriter index_writer{&encoded_shard_index};
TENSORSTORE_RETURN_IF_ERROR(EncodeShardIndex(
index_writer, ShardIndex{std::move(shard_index_array)},
shard_index_parameters));
ABSL_CHECK(index_writer.Close());
encoded_shard_index.Append(std::move(shard_data));
shard_data = std::move(encoded_shard_index);
break;
}
case ShardIndexLocation::kEnd: {
TENSORSTORE_RETURN_IF_ERROR(
EncodeShardIndex(writer, ShardIndex{std::move(shard_index_array)},
shard_index_parameters));
ABSL_CHECK(writer.Close());
break;
}
}
return shard_data;
}
}
} | #include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include <optional>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <nlohmann/json.hpp>
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/index.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
using ::tensorstore::zarr3_sharding_indexed::DecodeShard;
using ::tensorstore::zarr3_sharding_indexed::EncodeShard;
using ::tensorstore::zarr3_sharding_indexed::ShardEntries;
using ::tensorstore::zarr3_sharding_indexed::ShardIndexLocation;
using ::tensorstore::zarr3_sharding_indexed::ShardIndexParameters;
Result<ShardIndexParameters> GetParams(
ShardIndexLocation index_location, std::vector<Index> grid_shape,
::nlohmann::json::array_t index_codecs_json = {GetDefaultBytesCodecJson(),
{{"name", "crc32c"}}}) {
TENSORSTORE_ASSIGN_OR_RETURN(auto index_codecs,
ZarrCodecChainSpec::FromJson(index_codecs_json));
ShardIndexParameters p;
p.index_location = index_location;
TENSORSTORE_RETURN_IF_ERROR(p.Initialize(index_codecs, grid_shape));
return p;
}
TEST(InitializeTest, Success) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto p,
GetParams(ShardIndexLocation::kEnd, {2, 3}));
EXPECT_EQ(6, p.num_entries);
EXPECT_THAT(p.index_shape, ::testing::ElementsAre(2, 3, 2));
}
TEST(InitializeTest, InvalidIndexCodecs) {
EXPECT_THAT(
GetParams(ShardIndexLocation::kEnd, {2, 3},
{GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 5}}}}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: only fixed-size encodings are supported"));
}
TEST(InitializeTest, InvalidGridShape) {
EXPECT_THAT(
GetParams(ShardIndexLocation::kEnd, {1024 * 1024 * 1024 + 1}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"grid shape of .* has more than 1073741824 entries"));
}
TEST(EncodeShardTest, RoundTrip) {
for (auto index_location :
{ShardIndexLocation::kStart, ShardIndexLocation::kEnd}) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto p, GetParams(index_location, {2, 3}));
ShardEntries entries;
entries.entries = {
absl::Cord("(0, 0)"), absl::Cord("(0, 1)"), std::nullopt,
std::nullopt, absl::Cord("(1, 1)"), std::nullopt
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, EncodeShard(entries, p));
ASSERT_TRUE(encoded.has_value());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto decoded_entries,
DecodeShard(*encoded, p));
EXPECT_THAT(decoded_entries.entries,
::testing::ElementsAreArray(entries.entries));
}
}
TEST(EncodeShardTest, RoundTripEmpty) {
for (auto index_location :
{ShardIndexLocation::kStart, ShardIndexLocation::kEnd}) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto p, GetParams(index_location, {2, 3}));
ShardEntries entries;
entries.entries.resize(6);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, EncodeShard(entries, p));
ASSERT_FALSE(encoded.has_value());
}
}
TEST(DecodeShardTest, TooShort) {
absl::Cord encoded(std::string{1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto p,
GetParams(ShardIndexLocation::kEnd, {2}));
EXPECT_THAT(DecodeShard(encoded, p),
MatchesStatus(absl::StatusCode::kDataLoss,
"Existing shard has size of 3 bytes, but expected "
"at least .* bytes"));
}
TEST(DecodeShardTest, ByteRangeOutOfRange) {
absl::Cord encoded(std::string{
0, 0, 0, 0, 0, 0, 0, 0,
17, 0, 0, 0, 0, 0, 0, 0,
});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetParams(ShardIndexLocation::kEnd, {1},
{{{"name", "bytes"},
{"configuration", {{"endian", "little"}}}}}));
EXPECT_THAT(
DecodeShard(encoded, p),
MatchesStatus(absl::StatusCode::kDataLoss,
"Shard index entry 0 with byte range .* is invalid .*"));
}
TEST(DecodeShardTest, ByteRangeInvalid) {
unsigned char data[] = {
0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1, 0, 0, 0, 0, 0, 0, 0,
};
absl::Cord encoded(
std::string_view(reinterpret_cast<const char*>(data), sizeof(data)));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetParams(ShardIndexLocation::kEnd, {1},
{{{"name", "bytes"},
{"configuration", {{"endian", "little"}}}}}));
EXPECT_THAT(DecodeShard(encoded, p),
MatchesStatus(absl::StatusCode::kDataLoss,
"Invalid shard index entry 0 with .*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/shard_format.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/shard_format_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
61928d7e-efb0-4f2c-8acc-2f59f7b04420 | cpp | google/tensorstore | key | tensorstore/kvstore/zarr3_sharding_indexed/key.cc | tensorstore/kvstore/zarr3_sharding_indexed/key_test.cc | #include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/internal/endian.h"
#include "absl/status/status.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
std::string IndicesToKey(span<const Index> grid_cell_indices) {
std::string key;
key.resize(grid_cell_indices.size() * 4);
for (DimensionIndex i = 0; i < grid_cell_indices.size(); ++i) {
absl::big_endian::Store32(key.data() + i * 4, grid_cell_indices[i]);
}
return key;
}
bool KeyToIndices(std::string_view key, span<Index> grid_cell_indices) {
if (key.size() != grid_cell_indices.size() * 4) {
return false;
}
for (DimensionIndex i = 0; i < grid_cell_indices.size(); ++i) {
grid_cell_indices[i] = absl::big_endian::Load32(key.data() + i * 4);
}
return true;
}
std::optional<EntryId> KeyToEntryId(std::string_view key,
span<const Index> grid_shape) {
const DimensionIndex rank = grid_shape.size();
if (rank * sizeof(uint32_t) != key.size()) return {};
EntryId id = 0;
for (DimensionIndex i = 0; i < rank; ++i) {
auto index = absl::big_endian::Load32(key.data() + i * 4);
if (index >= grid_shape[i]) return {};
id *= grid_shape[i];
id += index;
}
return id;
}
Result<EntryId> KeyToEntryIdOrError(std::string_view key,
span<const Index> grid_shape) {
if (auto entry_id = KeyToEntryId(key, grid_shape)) {
return *entry_id;
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid key (grid_shape=", grid_shape,
"): ", tensorstore::QuoteString(key)));
}
std::string EntryIdToKey(EntryId entry_id, span<const Index> grid_shape) {
std::string key;
key.resize(grid_shape.size() * 4);
for (DimensionIndex i = grid_shape.size(); i--;) {
const Index size = grid_shape[i];
absl::big_endian::Store32(key.data() + i * 4, entry_id % size);
entry_id /= size;
}
return key;
}
EntryId LowerBoundToEntryId(std::string_view key,
span<const Index> grid_shape) {
char key_padded[kMaxRank * 4];
const size_t full_key_size = grid_shape.size() * 4;
const size_t key_bytes_to_copy = std::min(full_key_size, key.size());
std::memcpy(key_padded, key.data(), key_bytes_to_copy);
std::memset(key_padded + key_bytes_to_copy, 0,
full_key_size - key_bytes_to_copy);
EntryId entry_id = 0;
EntryId remaining_indices_mask = ~static_cast<EntryId>(0);
EntryId max_entry_id = 1;
for (DimensionIndex i = 0; i < grid_shape.size(); ++i) {
const EntryId size = grid_shape[i];
max_entry_id *= size;
EntryId index = absl::big_endian::Load32(&key_padded[i * 4]);
entry_id *= size;
if (index >= size) {
entry_id += (size & remaining_indices_mask);
remaining_indices_mask = 0;
} else {
entry_id += (index & remaining_indices_mask);
}
}
assert(entry_id <= max_entry_id);
if (key.size() > full_key_size) {
if (entry_id < max_entry_id) {
++entry_id;
}
}
return entry_id;
}
std::pair<EntryId, EntryId> KeyRangeToEntryRange(std::string_view inclusive_min,
std::string_view exclusive_max,
span<const Index> grid_shape) {
EntryId lower_bound = LowerBoundToEntryId(inclusive_min, grid_shape);
EntryId upper_bound;
if (exclusive_max.empty()) {
upper_bound = static_cast<EntryId>(ProductOfExtents(grid_shape));
} else {
upper_bound = LowerBoundToEntryId(exclusive_max, grid_shape);
}
return {lower_bound, upper_bound};
}
EntryId InternalKeyLowerBoundToEntryId(std::string_view key,
int64_t num_entries_per_shard) {
char key_bytes[4] = {};
std::memcpy(key_bytes, key.data(),
std::min(static_cast<size_t>(4), key.size()));
EntryId entry_id = absl::big_endian::Load32(key_bytes);
if (entry_id > num_entries_per_shard) {
entry_id = num_entries_per_shard;
}
if (key.size() > 4 && entry_id < num_entries_per_shard) {
++entry_id;
}
return entry_id;
}
std::pair<EntryId, EntryId> InternalKeyRangeToEntryRange(
std::string_view inclusive_min, std::string_view exclusive_max,
int64_t num_entries_per_shard) {
return {InternalKeyLowerBoundToEntryId(inclusive_min, num_entries_per_shard),
exclusive_max.empty() ? EntryId(num_entries_per_shard)
: InternalKeyLowerBoundToEntryId(
exclusive_max, num_entries_per_shard)};
}
std::string EntryIdToInternalKey(EntryId entry_id) {
std::string key;
key.resize(4);
absl::big_endian::Store32(key.data(), entry_id);
return key;
}
EntryId InternalKeyToEntryId(std::string_view key) {
assert(key.size() == 4);
return static_cast<EntryId>(absl::big_endian::Load32(key.data()));
}
KeyRange KeyRangeToInternalKeyRange(const KeyRange& range,
span<const Index> grid_shape) {
auto [inclusive_min_entry, exclusive_max_entry] = KeyRangeToEntryRange(
range.inclusive_min, range.exclusive_max, grid_shape);
return KeyRange{EntryIdToInternalKey(inclusive_min_entry),
EntryIdToInternalKey(exclusive_max_entry)};
}
std::string DescribeEntryId(EntryId entry_id, span<const Index> grid_shape) {
Index indices[kMaxRank];
span<Index> indices_span(&indices[0], grid_shape.size());
GetContiguousIndices<c_order, Index>(entry_id, grid_shape, indices_span);
return tensorstore::StrCat("shard entry ", indices_span, "/", grid_shape);
}
std::string DescribeKey(std::string_view key, span<const Index> grid_shape) {
if (auto entry_id = KeyToEntryId(key, grid_shape)) {
return DescribeEntryId(*entry_id, grid_shape);
}
return tensorstore::StrCat("invalid shard entry ",
tensorstore::QuoteString(key), "/", grid_shape);
}
std::string DescribeInternalKey(std::string_view key,
span<const Index> grid_shape) {
return DescribeEntryId(InternalKeyToEntryId(key), grid_shape);
}
}
} | #include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/kvstore/key_range.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::KeyRange;
using ::tensorstore::zarr3_sharding_indexed::EntryId;
using ::tensorstore::zarr3_sharding_indexed::EntryIdToInternalKey;
using ::tensorstore::zarr3_sharding_indexed::EntryIdToKey;
using ::tensorstore::zarr3_sharding_indexed::IndicesToKey;
using ::tensorstore::zarr3_sharding_indexed::InternalKeyLowerBoundToEntryId;
using ::tensorstore::zarr3_sharding_indexed::InternalKeyRangeToEntryRange;
using ::tensorstore::zarr3_sharding_indexed::InternalKeyToEntryId;
using ::tensorstore::zarr3_sharding_indexed::KeyRangeToEntryRange;
using ::tensorstore::zarr3_sharding_indexed::KeyRangeToInternalKeyRange;
using ::tensorstore::zarr3_sharding_indexed::KeyToEntryId;
using ::tensorstore::zarr3_sharding_indexed::KeyToIndices;
using ::tensorstore::zarr3_sharding_indexed::LowerBoundToEntryId;
TEST(KeyToEntryIdTest, Basic) {
EntryId entry_id = 1 * 5 * 6 + 2 * 6 + 3;
std::string key{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3};
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(KeyToEntryId(key, grid_shape), ::testing::Optional(entry_id));
EXPECT_THAT(EntryIdToKey(entry_id, grid_shape), ::testing::Eq(key));
}
TEST(KeyToEntryIdTest, OutOfRange) {
EXPECT_THAT(KeyToEntryId(std::string{0, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 3},
{{4, 5, 6}}),
::testing::Eq(std::nullopt));
}
TEST(KeyToEntryIdTest, Invalid) {
EXPECT_THAT(
KeyToEntryId(std::string{0, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}, {{4, 5, 6}}),
::testing::Eq(std::nullopt));
}
TEST(IndicesToKeyTest, Basic) {
const Index indices[] = {1, 2, 3};
std::string key{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3};
EXPECT_THAT(IndicesToKey(indices), ::testing::Eq(key));
Index decoded_indices[3];
EXPECT_TRUE(KeyToIndices(key, decoded_indices));
EXPECT_THAT(decoded_indices, ::testing::ElementsAreArray(indices));
EXPECT_FALSE(KeyToIndices(key.substr(1), decoded_indices));
}
TEST(LowerBoundToEntryId, Exact) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(LowerBoundToEntryId(
std::string{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3}, grid_shape),
::testing::Eq(1 * 5 * 6 + 2 * 6 + 3));
}
TEST(LowerBoundToEntryId, Longer) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(
LowerBoundToEntryId(std::string{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0},
grid_shape),
::testing::Eq(1 * 5 * 6 + 2 * 6 + 4));
}
TEST(KeyRangeToEntryRange, Full) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(KeyRangeToEntryRange("", "", grid_shape),
::testing::Pair(0, 4 * 5 * 6));
}
TEST(KeyRangeToEntryRange, Partial) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(
KeyRangeToEntryRange(
std::string{
0, 0, 0, 2,
0, 0, 0, 3,
0, 0, 0, 4,
},
std::string{
0, 0, 0, 2,
0, 0, 0, 4,
0, 0, 0, 5,
},
grid_shape),
::testing::Pair(2 * (5 * 6) + 3 * 6 + 4, 2 * (5 * 6) + 4 * 6 + 5));
EXPECT_THAT(KeyRangeToInternalKeyRange(KeyRange{std::string{
0, 0, 0, 2,
0, 0, 0, 3,
0, 0, 0, 4,
},
std::string{
0, 0, 0, 2,
0, 0, 0, 4,
0, 0, 0, 5,
}},
grid_shape),
KeyRange(EntryIdToInternalKey(2 * (5 * 6) + 3 * 6 + 4),
EntryIdToInternalKey(2 * (5 * 6) + 4 * 6 + 5)));
}
TEST(EntryIdToInternalKeyTest, Basic) {
EntryId entry_id = 0x01020304;
std::string internal_key{0x01, 0x02, 0x03, 0x04};
EXPECT_THAT(EntryIdToInternalKey(entry_id), ::testing::Eq(internal_key));
EXPECT_THAT(InternalKeyToEntryId(internal_key), ::testing::Eq(entry_id));
}
TEST(InternalKeyLowerBoundToEntryIdTest, Basic) {
EXPECT_THAT(InternalKeyLowerBoundToEntryId(
std::string{0x01, 0x02, 0x03, 0x04}, 0x88888888),
::testing::Eq(0x01020304));
EXPECT_THAT(InternalKeyLowerBoundToEntryId(
std::string{0x01, 0x02, 0x03, 0x04, 0x0}, 0x88888888),
::testing::Eq(0x01020304 + 1));
EXPECT_THAT(
InternalKeyLowerBoundToEntryId(std::string{0x01, 0x02, 0x03}, 0x88888888),
::testing::Eq(0x01020300));
EXPECT_THAT(InternalKeyLowerBoundToEntryId(
std::string{0x01, 0x02, 0x03, 0x04}, 0x01020302),
::testing::Eq(0x01020302));
}
TEST(InternalKeyRangeToEntryRange, Basic) {
EXPECT_THAT(InternalKeyRangeToEntryRange(std::string{0x01, 0x02, 0x03, 0x04},
std::string{0x01, 0x02, 0x03, 0x07},
0x88888888),
::testing::Pair(0x01020304, 0x01020307));
EXPECT_THAT(InternalKeyRangeToEntryRange(std::string{0x01, 0x02, 0x03, 0x04},
{}, 0x88888888),
::testing::Pair(0x01020304, 0x88888888));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/key.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/key_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e0018a36-efb1-402e-a04d-3986a28f728c | cpp | google/tensorstore | zarr3_sharding_indexed | tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.cc | tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed_test.cc | #include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/dimension_indexed.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_modify_write.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/transaction.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/bit_vec.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/flow_sender_operation_state.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/cache_key/std_vector.h"
#include "tensorstore/internal/estimate_heap_usage/std_optional.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
#include "tensorstore/serialization/std_vector.h"
#include "tensorstore/util/execution/result_sender.h"
#include "tensorstore/util/garbage_collection/std_vector.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
namespace {
using ::tensorstore::internal_kvstore::DeleteRangeEntry;
using ::tensorstore::internal_kvstore::kReadModifyWrite;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
class ShardIndexKeyValueStore : public kvstore::Driver {
public:
explicit ShardIndexKeyValueStore(kvstore::DriverPtr base,
ShardIndexLocation index_location,
int64_t index_size_in_bytes)
: base_(std::move(base)),
index_location_(index_location),
index_size_in_bytes_(index_size_in_bytes) {}
Future<kvstore::ReadResult> Read(kvstore::Key key,
kvstore::ReadOptions options) override {
assert(options.byte_range == OptionalByteRangeRequest{});
switch (index_location_) {
case ShardIndexLocation::kStart:
options.byte_range =
OptionalByteRangeRequest::Range(0, index_size_in_bytes_);
break;
case ShardIndexLocation::kEnd:
options.byte_range =
OptionalByteRangeRequest::SuffixLength(index_size_in_bytes_);
break;
}
return MapFutureError(
InlineExecutor{},
[](const absl::Status& status) {
return internal::ConvertInvalidArgumentToFailedPrecondition(status);
},
base_->Read(std::move(key), std::move(options)));
}
std::string DescribeKey(std::string_view key) override {
return tensorstore::StrCat("shard index in ", base_->DescribeKey(key));
}
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const final {
}
kvstore::Driver* base() { return base_.get(); }
private:
kvstore::DriverPtr base_;
ShardIndexLocation index_location_;
int64_t index_size_in_bytes_;
};
class ShardIndexCache
: public internal::KvsBackedCache<ShardIndexCache, internal::AsyncCache> {
using Base = internal::KvsBackedCache<ShardIndexCache, internal::AsyncCache>;
public:
using ReadData = ShardIndex;
class Entry : public Base::Entry {
public:
using OwningCache = ShardIndexCache;
size_t ComputeReadDataSizeInBytes(const void* read_data) override {
const auto& cache = GetOwningCache(*this);
return read_data
? cache.shard_index_params().num_entries * sizeof(uint64_t) * 2
: 0;
}
std::string GetKeyValueStoreKey() override {
return GetOwningCache(*this).base_kvstore_path_;
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
std::shared_ptr<ReadData> read_data;
if (value) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto shard_index,
DecodeShardIndex(*value,
GetOwningCache(*this).shard_index_params()),
static_cast<void>(execution::set_error(receiver, _)));
read_data = std::make_shared<ReadData>(std::move(shard_index));
}
execution::set_value(receiver, std::move(read_data));
});
}
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
ABSL_UNREACHABLE();
}
explicit ShardIndexCache(kvstore::DriverPtr base_kvstore,
std::string base_kvstore_path, Executor executor,
ShardIndexParameters&& params)
: Base(kvstore::DriverPtr(new ShardIndexKeyValueStore(
std::move(base_kvstore), params.index_location,
params.index_codec_state->encoded_size()))),
base_kvstore_path_(std::move(base_kvstore_path)),
executor_(std::move(executor)),
shard_index_params_(std::move(params)) {}
ShardIndexKeyValueStore* shard_index_kvstore_driver() {
return static_cast<ShardIndexKeyValueStore*>(this->Base::kvstore_driver());
}
kvstore::Driver* base_kvstore_driver() {
return shard_index_kvstore_driver()->base();
}
const std::string& base_kvstore_path() const { return base_kvstore_path_; }
const Executor& executor() { return executor_; }
span<const Index> grid_shape() const {
return span<const Index>(shard_index_params_.index_shape.data(),
shard_index_params_.index_shape.size() - 1);
}
const ShardIndexParameters& shard_index_params() const {
return shard_index_params_;
}
std::string base_kvstore_path_;
Executor executor_;
ShardIndexParameters shard_index_params_;
};
class ShardedKeyValueStoreWriteCache
: public internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache> {
using Base = internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache>;
public:
using ReadData = ShardEntries;
explicit ShardedKeyValueStoreWriteCache(
internal::CachePtr<ShardIndexCache> shard_index_cache)
: Base(kvstore::DriverPtr(shard_index_cache->base_kvstore_driver())),
shard_index_cache_(std::move(shard_index_cache)) {}
class Entry : public Base::Entry {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
size_t ComputeReadDataSizeInBytes(const void* data) override {
return internal::EstimateHeapUsage(*static_cast<const ReadData*>(data));
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
ShardEntries entries;
const auto& shard_index_params =
GetOwningCache(*this).shard_index_params();
if (value) {
TENSORSTORE_ASSIGN_OR_RETURN(
entries, DecodeShard(*value, shard_index_params),
static_cast<void>(execution::set_error(receiver, _)));
} else {
entries.entries.resize(shard_index_params.num_entries);
}
execution::set_value(
receiver, std::make_shared<ShardEntries>(std::move(entries)));
});
}
void DoEncode(std::shared_ptr<const ShardEntries> data,
EncodeReceiver receiver) override {
TENSORSTORE_ASSIGN_OR_RETURN(
auto encoded_shard,
EncodeShard(*data, GetOwningCache(*this).shard_index_params()),
static_cast<void>(execution::set_error(receiver, _)));
execution::set_value(receiver, std::move(encoded_shard));
}
std::string GetKeyValueStoreKey() override {
return GetOwningCache(*this).base_kvstore_path();
}
};
class TransactionNode : public Base::TransactionNode,
public internal_kvstore::AtomicMultiPhaseMutation {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
using Base::TransactionNode::TransactionNode;
absl::Mutex& mutex() override { return this->mutex_; }
void PhaseCommitDone(size_t next_phase) override {}
internal::TransactionState::Node& GetTransactionNode() override {
return *this;
}
void Abort() override {
this->AbortRemainingPhases();
Base::TransactionNode::Abort();
}
std::string DescribeKey(std::string_view key) override {
auto& cache = GetOwningCache(*this);
return tensorstore::StrCat(
DescribeInternalKey(key, cache.shard_index_params().grid_shape()),
" in ",
cache.kvstore_driver()->DescribeKey(cache.base_kvstore_path()));
}
void DoApply(ApplyOptions options, ApplyReceiver receiver) override;
void StartApply();
void AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) override;
void MergeForWriteback(bool conditional);
void RecordEntryWritebackError(
internal_kvstore::ReadModifyWriteEntry& entry,
absl::Status error) override {
absl::MutexLock lock(&mutex_);
if (apply_status_.ok()) {
apply_status_ = std::move(error);
}
}
void Revoke() override {
Base::TransactionNode::Revoke();
{ UniqueWriterLock(*this); }
this->RevokeAllEntries();
}
void WritebackSuccess(ReadState&& read_state) override;
void WritebackError() override;
void InvalidateReadState() override;
bool MultiPhaseReadsCommitted() override { return this->reads_committed_; }
void Read(
internal_kvstore::ReadModifyWriteEntry& entry,
kvstore::ReadModifyWriteTarget::TransactionalReadOptions&& options,
kvstore::ReadModifyWriteTarget::ReadReceiver&& receiver) override {
this->AsyncCache::TransactionNode::Read({options.staleness_bound})
.ExecuteWhenReady(WithExecutor(
GetOwningCache(*this).executor(),
[&entry,
if_not_equal =
std::move(options.generation_conditions.if_not_equal),
receiver = std::move(receiver)](
ReadyFuture<const void> future) mutable {
if (!future.result().ok()) {
execution::set_error(receiver, future.result().status());
return;
}
execution::submit(HandleShardReadSuccess(entry, if_not_equal),
receiver);
}));
}
static Result<kvstore::ReadResult> HandleShardReadSuccess(
internal_kvstore::ReadModifyWriteEntry& entry,
const StorageGeneration& if_not_equal) {
auto& self = static_cast<TransactionNode&>(entry.multi_phase());
TimestampedStorageGeneration stamp;
std::shared_ptr<const ShardEntries> entries;
{
AsyncCache::ReadLock<ShardEntries> lock{self};
stamp = lock.stamp();
entries = lock.shared_data();
}
if (!StorageGeneration::IsUnknown(stamp.generation) &&
stamp.generation == if_not_equal) {
return kvstore::ReadResult::Unspecified(std::move(stamp));
}
if (StorageGeneration::IsDirty(stamp.generation)) {
stamp.generation =
StorageGeneration::AddLayer(std::move(stamp.generation));
}
auto entry_id = InternalKeyToEntryId(entry.key_);
const auto& shard_entry = entries->entries[entry_id];
if (!shard_entry) {
return kvstore::ReadResult::Missing(std::move(stamp));
} else {
return kvstore::ReadResult::Value(*shard_entry, std::move(stamp));
}
}
ApplyReceiver apply_receiver_;
ApplyOptions apply_options_;
absl::Status apply_status_;
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
const internal::CachePtr<ShardIndexCache>& shard_index_cache() const {
return shard_index_cache_;
}
const Executor& executor() { return shard_index_cache()->executor(); }
const ShardIndexParameters& shard_index_params() const {
return shard_index_cache_->shard_index_params();
}
int64_t num_entries_per_shard() const {
return shard_index_cache_->shard_index_params().num_entries;
}
const std::string& base_kvstore_path() const {
return shard_index_cache_->base_kvstore_path();
}
internal::CachePtr<ShardIndexCache> shard_index_cache_;
};
void ShardedKeyValueStoreWriteCache::TransactionNode::InvalidateReadState() {
Base::TransactionNode::InvalidateReadState();
internal_kvstore::InvalidateReadState(phases_);
}
void ShardedKeyValueStoreWriteCache::TransactionNode::DoApply(
ApplyOptions options, ApplyReceiver receiver) {
apply_receiver_ = std::move(receiver);
apply_options_ = options;
apply_status_ = absl::OkStatus();
GetOwningCache(*this).executor()([this] { this->StartApply(); });
}
void ShardedKeyValueStoreWriteCache::TransactionNode::StartApply() {
RetryAtomicWriteback(apply_options_.staleness_bound);
}
void ShardedKeyValueStoreWriteCache::TransactionNode::AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) {
if (!apply_status_.ok()) {
execution::set_error(std::exchange(apply_receiver_, {}),
std::exchange(apply_status_, {}));
return;
}
auto& self = *this;
GetOwningCache(*this).executor()([&self] {
TimestampedStorageGeneration stamp;
bool mismatch = false;
bool modified = false;
int64_t num_entries = 0;
auto& cache = GetOwningCache(self);
const int64_t num_entries_per_shard = cache.num_entries_per_shard();
for (auto& entry : self.phases_.entries_) {
if (entry.entry_type() != kReadModifyWrite) {
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
auto [begin_id, end_id] = InternalKeyRangeToEntryRange(
dr_entry.key_, dr_entry.exclusive_max_, num_entries_per_shard);
modified = true;
num_entries += end_id - begin_id;
continue;
}
auto& buffered_entry =
static_cast<AtomicMultiPhaseMutation::BufferedReadModifyWriteEntry&>(
entry);
if (buffered_entry.value_state_ != kvstore::ReadResult::kUnspecified) {
modified = true;
++num_entries;
}
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation)) {
if (!StorageGeneration::IsUnknown(stamp.generation) &&
StorageGeneration::Clean(stamp.generation) !=
StorageGeneration::Clean(entry_stamp.generation)) {
mismatch = true;
break;
} else {
stamp = entry_stamp;
}
}
}
if (mismatch) {
self.apply_options_.staleness_bound = absl::Now();
self.StartApply();
return;
}
if (!modified && StorageGeneration::IsUnknown(stamp.generation) &&
self.apply_options_.apply_mode !=
ApplyOptions::ApplyMode::kSpecifyUnchanged) {
internal::AsyncCache::ReadState update;
update.stamp = TimestampedStorageGeneration::Unconditional();
execution::set_value(std::exchange(self.apply_receiver_, {}),
std::move(update));
return;
}
if (!StorageGeneration::IsUnknown(stamp.generation) ||
num_entries != num_entries_per_shard) {
self.internal::AsyncCache::TransactionNode::Read(
{self.apply_options_.staleness_bound})
.ExecuteWhenReady([&self](ReadyFuture<const void> future) {
if (!future.result().ok()) {
execution::set_error(std::exchange(self.apply_receiver_, {}),
future.result().status());
return;
}
GetOwningCache(self).executor()(
[&self] { self.MergeForWriteback(true); });
});
return;
}
self.MergeForWriteback(false);
});
}
void ShardedKeyValueStoreWriteCache::TransactionNode::MergeForWriteback(
bool conditional) {
TimestampedStorageGeneration stamp;
ShardEntries new_entries;
if (conditional) {
auto lock = internal::AsyncCache::ReadLock<ShardEntries>{*this};
stamp = lock.stamp();
new_entries = *lock.shared_data();
} else {
stamp = TimestampedStorageGeneration::Unconditional();
}
auto& cache = GetOwningCache(*this);
const int64_t num_entries_per_shard = cache.num_entries_per_shard();
const bool has_existing_entries = !new_entries.entries.empty();
new_entries.entries.resize(num_entries_per_shard);
bool mismatch = false;
bool changed = false;
for (auto& entry : phases_.entries_) {
if (entry.entry_type() != kReadModifyWrite) {
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
auto [begin_id, end_id] = InternalKeyRangeToEntryRange(
dr_entry.key_, dr_entry.exclusive_max_, num_entries_per_shard);
if (has_existing_entries) {
for (EntryId id = begin_id; id < end_id; ++id) {
new_entries.entries[id] = std::nullopt;
}
}
changed = true;
continue;
}
auto& buffered_entry =
static_cast<internal_kvstore::AtomicMultiPhaseMutation::
BufferedReadModifyWriteEntry&>(entry);
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation) &&
StorageGeneration::Clean(entry_stamp.generation) !=
StorageGeneration::Clean(stamp.generation)) {
mismatch = true;
break;
}
if (buffered_entry.value_state_ == kvstore::ReadResult::kUnspecified ||
!StorageGeneration::IsInnerLayerDirty(entry_stamp.generation)) {
continue;
}
auto entry_id = InternalKeyToEntryId(buffered_entry.key_);
auto& new_entry = new_entries.entries[entry_id];
if (buffered_entry.value_state_ == kvstore::ReadResult::kValue) {
new_entry = buffered_entry.value_;
changed = true;
} else if (new_entry) {
new_entry = std::nullopt;
changed = true;
} else if (!conditional) {
changed = true;
}
}
if (mismatch) {
apply_options_.staleness_bound = absl::Now();
this->StartApply();
return;
}
internal::AsyncCache::ReadState update;
update.stamp = std::move(stamp);
if (changed) {
update.stamp.generation.MarkDirty();
}
update.data = std::make_shared<ShardEntries>(std::move(new_entries));
execution::set_value(std::exchange(apply_receiver_, {}), std::move(update));
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackSuccess(
ReadState&& read_state) {
for (auto& entry : phases_.entries_) {
if (entry.entry_type() != kReadModifyWrite) {
internal_kvstore::WritebackSuccess(static_cast<DeleteRangeEntry&>(entry));
} else {
internal_kvstore::WritebackSuccess(
static_cast<internal_kvstore::ReadModifyWriteEntry&>(entry),
read_state.stamp);
}
}
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackSuccess(std::move(read_state));
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackError() {
internal_kvstore::WritebackError(phases_);
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackError();
}
struct ShardedKeyValueStoreSpecData {
Context::Resource<internal::CachePoolResource> cache_pool;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
kvstore::Spec base;
std::vector<Index> grid_shape;
internal_zarr3::ZarrCodecChainSpec index_codecs;
ShardIndexLocation index_location;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ShardedKeyValueStoreSpecData,
internal_json_binding::NoOptions,
IncludeDefaults,
::nlohmann::json::object_t)
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.cache_pool, x.data_copy_concurrency, x.base, x.grid_shape,
x.index_codecs, x.index_location);
};
};
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
ShardedKeyValueStoreSpecData,
jb::Object(
jb::Member("base",
jb::Projection<&ShardedKeyValueStoreSpecData::base>()),
jb::Member(
"grid_shape",
jb::Projection<&ShardedKeyValueStoreSpecData::grid_shape>(
jb::Validate([](const auto& options,
auto* obj) { return ValidateGridShape(*obj); },
jb::ChunkShapeVector(nullptr)))),
jb::Member("index_codecs",
jb::Projection<&ShardedKeyValueStoreSpecData::index_codecs>(
internal_zarr3::ZarrCodecChainJsonBinder<
false>)),
jb::Member(
"index_location",
jb::Projection<&ShardedKeyValueStoreSpecData::index_location>(
jb::DefaultValue<jb::kAlwaysIncludeDefaults>([](auto* x) {
*x = ShardIndexLocation::kEnd;
}))),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&ShardedKeyValueStoreSpecData::cache_pool>()),
jb::Member(
internal::DataCopyConcurrencyResource::id,
jb::Projection<
&ShardedKeyValueStoreSpecData::data_copy_concurrency>())));
class ShardedKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<
ShardedKeyValueStoreSpec, ShardedKeyValueStoreSpecData> {
public:
static constexpr char id[] = "zarr3_sharding_indexed";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<kvstore::Spec> GetBase(std::string_view path) const override {
return data_.base;
}
};
class ShardedKeyValueStore
: public internal_kvstore::RegisteredDriver<ShardedKeyValueStore,
ShardedKeyValueStoreSpec> {
public:
explicit ShardedKeyValueStore(ShardedKeyValueStoreParameters&& params,
std::string_view shared_cache_key = {});
Future<ReadResult> Read(Key key, ReadOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override;
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) override;
Future<const void> DeleteRange(KeyRange range) override;
std::string DescribeKey(std::string_view key) override;
kvstore::SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final;
Result<KvStore> GetBase(std::string_view path,
const Transaction& transaction) const override;
kvstore::Driver* base_kvstore_driver() const {
return shard_index_cache()->base_kvstore_driver();
}
const ShardIndexParameters& shard_index_params() const {
return shard_index_cache()->shard_index_params();
}
const Executor& executor() const { return shard_index_cache()->executor(); }
const std::string& base_kvstore_path() const {
return shard_index_cache()->base_kvstore_path();
}
const internal::CachePtr<ShardIndexCache>& shard_index_cache() const {
return write_cache_->shard_index_cache_;
}
absl::Status GetBoundSpecData(ShardedKeyValueStoreSpecData& spec) const;
internal::CachePtr<ShardedKeyValueStoreWriteCache> write_cache_;
struct DataForSpec {
Context::Resource<internal::CachePoolResource> cache_pool_resource;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency_resource;
ZarrCodecChainSpec index_codecs;
};
std::unique_ptr<DataForSpec> data_for_spec_;
};
ShardedKeyValueStore::ShardedKeyValueStore(
ShardedKeyValueStoreParameters&& params,
std::string_view shared_cache_key) {
write_cache_ = internal::GetCache<ShardedKeyValueStoreWriteCache>(
params.cache_pool.get(), shared_cache_key, [&] {
return std::make_unique<ShardedKeyValueStoreWriteCache>(
internal::GetCache<ShardIndexCache>(
params.cache_pool.get(), "", [&] {
return std::make_unique<ShardIndexCache>(
std::move(params.base_kvstore),
std::move(params.base_kvstore_path),
std::move(params.executor),
std::move(params.index_params));
}));
});
this->SetBatchNestingDepth(
this->base_kvstore_driver()->BatchNestingDepth() +
1 +
1
);
}
class ReadOperationState;
using ReadOperationStateBase = internal_kvstore_batch::BatchReadEntry<
ShardedKeyValueStore, internal_kvstore_batch::ReadRequest<
EntryId, kvstore::ReadGenerationConditions>>;
class ReadOperationState
: public ReadOperationStateBase,
public internal::AtomicReferenceCount<ReadOperationState> {
public:
explicit ReadOperationState(BatchEntryKey&& batch_entry_key_)
: ReadOperationStateBase(std::move(batch_entry_key_)),
internal::AtomicReferenceCount<ReadOperationState>(
1) {}
private:
internal::PinnedCacheEntry<ShardIndexCache> shard_index_cache_entry_;
Batch successor_batch_{no_batch};
void Submit(Batch::View batch) override {
const auto& executor = driver().executor();
executor(
[this, batch = Batch(batch)] { this->ProcessBatch(std::move(batch)); });
}
void ProcessBatch(Batch batch) {
internal::IntrusivePtr<ReadOperationState> self(this,
internal::adopt_object_ref);
if (ShouldReadEntireShard()) {
ReadEntireShard(std::move(self), std::move(batch));
return;
}
shard_index_cache_entry_ =
GetCacheEntry(driver().shard_index_cache(), std::string_view{});
auto shard_index_read_future = shard_index_cache_entry_->Read(
{this->request_batch.staleness_bound, batch});
if (batch) {
if (!shard_index_read_future.ready()) {
successor_batch_ = Batch::New();
} else {
successor_batch_ = std::move(batch);
}
}
std::move(shard_index_read_future)
.ExecuteWhenReady(
[self = std::move(self)](ReadyFuture<const void> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), status = future.status()] {
if (!status.ok()) {
internal_kvstore_batch::SetCommonResult<Request>(
self->request_batch.requests, {status});
return;
}
OnShardIndexReady(std::move(self));
});
});
}
bool ShouldReadEntireShard() {
const int64_t num_entries_per_shard =
driver().shard_index_params().num_entries;
if (request_batch.requests.size() < num_entries_per_shard) {
return false;
}
const auto& first_request = request_batch.requests[0];
BitVec<> covered_entries(num_entries_per_shard);
int64_t num_covered = 0;
for (const auto& request : request_batch.requests) {
if (std::get<kvstore::ReadGenerationConditions>(request) !=
std::get<kvstore::ReadGenerationConditions>(first_request)) {
return false;
}
if (std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.byte_range.IsFull()) {
auto ref = covered_entries[std::get<EntryId>(request)];
if (!ref) ++num_covered;
ref = true;
}
}
if (num_covered != num_entries_per_shard) {
return false;
}
return true;
}
static void ReadEntireShard(internal::IntrusivePtr<ReadOperationState> self,
Batch batch) {
auto& first_request = self->request_batch.requests[0];
kvstore::ReadOptions read_options;
read_options.batch = std::move(batch);
read_options.generation_conditions =
std::move(std::get<kvstore::ReadGenerationConditions>(first_request));
read_options.staleness_bound = self->request_batch.staleness_bound;
auto& driver = self->driver();
driver.base_kvstore_driver()
->Read(driver.base_kvstore_path(), std::move(read_options))
.ExecuteWhenReady([self = std::move(self)](
ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), future = std::move(future)] {
OnFullShardReady(std::move(self), std::move(future.result()));
});
});
}
static void OnFullShardReady(internal::IntrusivePtr<ReadOperationState> self,
Result<kvstore::ReadResult>&& result) {
if (!result.ok() || !result->has_value()) {
internal_kvstore_batch::SetCommonResult(self->request_batch.requests,
std::move(result));
return;
}
auto& read_result = *result;
TENSORSTORE_ASSIGN_OR_RETURN(
auto shard_index,
DecodeShardIndexFromFullShard(read_result.value,
self->driver().shard_index_params()),
internal_kvstore_batch::SetCommonResult(self->request_batch.requests,
_));
const auto complete_request = [&](Request& request) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
const auto index_entry = shard_index[std::get<EntryId>(request)];
if (index_entry.IsMissing()) {
byte_range_request.promise.SetResult(
kvstore::ReadResult::Missing(read_result.stamp));
return;
}
TENSORSTORE_RETURN_IF_ERROR(
index_entry.Validate(std::get<EntryId>(request),
read_result.value.size()),
static_cast<void>(byte_range_request.promise.SetResult(_)));
TENSORSTORE_ASSIGN_OR_RETURN(
auto validated_byte_range,
byte_range_request.byte_range.Validate(index_entry.length),
static_cast<void>(byte_range_request.promise.SetResult(_)));
validated_byte_range.inclusive_min += index_entry.offset;
validated_byte_range.exclusive_max += index_entry.offset;
kvstore::ReadResult request_read_result;
request_read_result.stamp = read_result.stamp;
request_read_result.state = kvstore::ReadResult::kValue;
request_read_result.value =
internal::GetSubCord(read_result.value, validated_byte_range);
byte_range_request.promise.SetResult(std::move(request_read_result));
};
for (auto& request : self->request_batch.requests) {
complete_request(request);
}
}
static void OnShardIndexReady(
internal::IntrusivePtr<ReadOperationState> self) {
std::shared_ptr<const ShardIndex> shard_index;
TimestampedStorageGeneration stamp;
{
auto lock = internal::AsyncCache::ReadLock<ShardIndexCache::ReadData>(
*self->shard_index_cache_entry_);
stamp = lock.stamp();
shard_index = lock.shared_data();
}
assert(!StorageGeneration::IsUnknown(stamp.generation));
if (!shard_index) {
internal_kvstore_batch::SetCommonResult(
self->request_batch.requests,
kvstore::ReadResult::Missing(std::move(stamp)));
return;
}
auto successor_batch = std::move(self->successor_batch_);
if (successor_batch) {
self->successor_batch_ = Batch::New();
}
const auto process_request = [&](Request& request) {
ShardIndexEntry index_entry = ShardIndexEntry::Missing();
kvstore::ReadResult::State state;
if (!std::get<kvstore::ReadGenerationConditions>(request).Matches(
stamp.generation)) {
state = kvstore::ReadResult::kUnspecified;
} else {
index_entry = (*shard_index)[std::get<EntryId>(request)];
state = kvstore::ReadResult::kMissing;
}
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
if (index_entry.IsMissing()) {
byte_range_request.promise.SetResult(
kvstore::ReadResult{state, {}, stamp});
return;
}
TENSORSTORE_RETURN_IF_ERROR(
index_entry.Validate(std::get<EntryId>(request)),
static_cast<void>(byte_range_request.promise.SetResult(
self->shard_index_cache_entry_->AnnotateError(
_,
true))));
assert(byte_range_request.byte_range.SatisfiesInvariants());
TENSORSTORE_ASSIGN_OR_RETURN(
auto validated_byte_range,
byte_range_request.byte_range.Validate(index_entry.length),
static_cast<void>(byte_range_request.promise.SetResult(_)));
if (validated_byte_range.inclusive_min ==
validated_byte_range.exclusive_max) {
byte_range_request.promise.SetResult(kvstore::ReadResult{
kvstore::ReadResult::kValue, absl::Cord(), stamp});
return;
}
kvstore::ReadOptions kvs_read_options;
kvs_read_options.generation_conditions.if_equal = stamp.generation;
kvs_read_options.staleness_bound = self->request_batch.staleness_bound;
kvs_read_options.batch = successor_batch;
kvs_read_options.byte_range =
ByteRange{static_cast<int64_t>(index_entry.offset +
validated_byte_range.inclusive_min),
static_cast<int64_t>(index_entry.offset +
validated_byte_range.exclusive_max)};
self->driver()
.base_kvstore_driver()
->Read(std::string(self->driver().base_kvstore_path()),
std::move(kvs_read_options))
.ExecuteWhenReady([self, &request](ReadyFuture<kvstore::ReadResult>
future) mutable {
const auto& status = future.status();
if (!status.ok()) {
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(status);
return;
}
const auto& executor = self->driver().executor();
executor([self = std::move(self), &request,
future = std::move(future)] {
OnValueReady(std::move(self), request, std::move(future.value()));
});
});
};
for (auto& request : self->request_batch.requests) {
process_request(request);
}
}
static void OnValueReady(internal::IntrusivePtr<ReadOperationState> self,
Request& request, kvstore::ReadResult&& value) {
if (value.aborted()) {
MakeRequest<ReadOperationState>(self->driver(), self->successor_batch_,
value.stamp.time, std::move(request));
return;
}
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(std::move(value));
}
};
Future<kvstore::ReadResult> ShardedKeyValueStore::Read(Key key,
ReadOptions options) {
TENSORSTORE_ASSIGN_OR_RETURN(
EntryId entry_id,
KeyToEntryIdOrError(key, shard_index_params().grid_shape()));
auto [promise, future] = PromiseFuturePair<kvstore::ReadResult>::Make();
ReadOperationState::MakeRequest<ReadOperationState>(
*this, options.batch, options.staleness_bound,
ReadOperationState::Request{{std::move(promise), options.byte_range},
{entry_id},
std::move(options.generation_conditions)});
return std::move(future);
}
struct ListOperationState
: public internal::FlowSenderOperationState<kvstore::ListEntry> {
using Base = internal::FlowSenderOperationState<kvstore::ListEntry>;
using Base::Base;
internal::PinnedCacheEntry<ShardIndexCache> shard_index_cache_entry_;
kvstore::ListOptions options_;
static void Start(ShardedKeyValueStore& store, kvstore::ListOptions&& options,
ListReceiver&& receiver) {
options.range = KeyRangeToInternalKeyRange(
options.range, store.shard_index_params().grid_shape());
auto self =
internal::MakeIntrusivePtr<ListOperationState>(std::move(receiver));
self->options_ = std::move(options);
self->shard_index_cache_entry_ =
GetCacheEntry(store.shard_index_cache(), std::string_view{});
auto shard_index_read_future =
self->shard_index_cache_entry_->Read({self->options_.staleness_bound});
auto* self_ptr = self.get();
LinkValue(
WithExecutor(store.executor(),
[self = std::move(self)](Promise<void> promise,
ReadyFuture<const void> future) {
if (self->cancelled()) return;
self->OnShardIndexReady();
}),
self_ptr->promise, std::move(shard_index_read_future));
}
void OnShardIndexReady() {
auto shard_index =
internal::AsyncCache::ReadLock<ShardIndex>(*shard_index_cache_entry_)
.shared_data();
if (!shard_index) {
return;
}
const auto& shard_index_params =
GetOwningCache(*shard_index_cache_entry_).shard_index_params();
span<const Index> grid_shape = shard_index_params.grid_shape();
auto start_index = InternalKeyToEntryId(options_.range.inclusive_min);
auto end_index = InternalKeyToEntryId(options_.range.exclusive_max);
auto& receiver = shared_receiver->receiver;
for (EntryId i = start_index; i < end_index; ++i) {
auto index_entry = (*shard_index)[i];
if (index_entry.IsMissing()) continue;
auto key = EntryIdToKey(i, grid_shape);
key.erase(0, options_.strip_prefix_length);
execution::set_value(receiver,
ListEntry{
std::move(key),
ListEntry::checked_size(index_entry.length),
});
}
}
};
void ShardedKeyValueStore::ListImpl(ListOptions options,
ListReceiver receiver) {
ListOperationState::Start(*this, std::move(options), std::move(receiver));
}
Future<TimestampedStorageGeneration> ShardedKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
return internal_kvstore::WriteViaTransaction(
this, std::move(key), std::move(value), std::move(options));
}
absl::Status ShardedKeyValueStore::ReadModifyWrite(
internal::OpenTransactionPtr& transaction, size_t& phase, Key key,
ReadModifyWriteSource& source) {
TENSORSTORE_ASSIGN_OR_RETURN(
EntryId entry_id,
KeyToEntryIdOrError(key, shard_index_params().grid_shape()));
key = EntryIdToInternalKey(entry_id);
auto entry = GetCacheEntry(write_cache_, std::string_view{});
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*entry, transaction));
node->ReadModifyWrite(phase, std::move(key), source);
if (!transaction) {
transaction.reset(node.unlock()->transaction());
}
return absl::OkStatus();
}
absl::Status ShardedKeyValueStore::TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) {
range = KeyRangeToInternalKeyRange(range, shard_index_params().grid_shape());
auto entry = GetCacheEntry(write_cache_, std::string_view{});
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*entry, transaction));
node->DeleteRange(std::move(range));
return absl::OkStatus();
}
Future<const void> ShardedKeyValueStore::DeleteRange(KeyRange range) {
range = KeyRangeToInternalKeyRange(range, shard_index_params().grid_shape());
internal::OpenTransactionPtr transaction;
auto entry = GetCacheEntry(write_cache_, std::string_view{});
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*entry, transaction));
node->DeleteRange(std::move(range));
return node->transaction()->future();
}
std::string ShardedKeyValueStore::DescribeKey(std::string_view key) {
return tensorstore::StrCat(
zarr3_sharding_indexed::DescribeKey(key,
shard_index_params().grid_shape()),
" in ", base_kvstore_driver()->DescribeKey(base_kvstore_path()));
}
kvstore::SupportedFeatures ShardedKeyValueStore::GetSupportedFeatures(
const KeyRange& key_range) const {
return base_kvstore_driver()->GetSupportedFeatures(
KeyRange::Singleton(base_kvstore_path()));
}
Result<KvStore> ShardedKeyValueStore::GetBase(
std::string_view path, const Transaction& transaction) const {
return KvStore(kvstore::DriverPtr(base_kvstore_driver()), base_kvstore_path(),
transaction);
}
}
}
namespace garbage_collection {
template <>
struct GarbageCollection<zarr3_sharding_indexed::ShardedKeyValueStore> {
static void Visit(GarbageCollectionVisitor& visitor,
const zarr3_sharding_indexed::ShardedKeyValueStore& value) {
garbage_collection::GarbageCollectionVisit(visitor,
*value.base_kvstore_driver());
}
};
}
namespace zarr3_sharding_indexed {
absl::Status ShardedKeyValueStore::GetBoundSpecData(
ShardedKeyValueStoreSpecData& spec) const {
if (!data_for_spec_) {
return absl::UnimplementedError("");
}
TENSORSTORE_ASSIGN_OR_RETURN(spec.base.driver,
base_kvstore_driver()->GetBoundSpec());
spec.base.path = base_kvstore_path();
spec.data_copy_concurrency = data_for_spec_->data_copy_concurrency_resource;
spec.cache_pool = data_for_spec_->cache_pool_resource;
spec.index_codecs = data_for_spec_->index_codecs;
const auto& shard_index_params = this->shard_index_params();
spec.index_location = shard_index_params.index_location;
spec.grid_shape.assign(shard_index_params.index_shape.begin(),
shard_index_params.index_shape.end() - 1);
return absl::OkStatus();
}
Future<kvstore::DriverPtr> ShardedKeyValueStoreSpec::DoOpen() const {
ShardIndexParameters index_params;
index_params.index_location = data_.index_location;
TENSORSTORE_RETURN_IF_ERROR(
index_params.Initialize(data_.index_codecs, data_.grid_shape));
return MapFutureValue(
InlineExecutor{},
[spec = internal::IntrusivePtr<const ShardedKeyValueStoreSpec>(this),
index_params =
std::move(index_params)](kvstore::KvStore& base_kvstore) mutable
-> Result<kvstore::DriverPtr> {
std::string cache_key;
internal::EncodeCacheKey(
&cache_key, base_kvstore.driver, base_kvstore.path,
spec->data_.data_copy_concurrency, spec->data_.grid_shape,
spec->data_.index_codecs);
ShardedKeyValueStoreParameters params;
params.base_kvstore = std::move(base_kvstore.driver);
params.base_kvstore_path = std::move(base_kvstore.path);
params.executor = spec->data_.data_copy_concurrency->executor;
params.cache_pool = *spec->data_.cache_pool;
params.index_params = std::move(index_params);
auto driver = internal::MakeIntrusivePtr<ShardedKeyValueStore>(
std::move(params), cache_key);
driver->data_for_spec_.reset(new ShardedKeyValueStore::DataForSpec{
spec->data_.cache_pool,
spec->data_.data_copy_concurrency,
spec->data_.index_codecs,
});
return driver;
},
kvstore::Open(data_.base));
}
kvstore::DriverPtr GetShardedKeyValueStore(
ShardedKeyValueStoreParameters&& parameters) {
return kvstore::DriverPtr(new ShardedKeyValueStore(std::move(parameters)));
}
}
}
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::zarr3_sharding_indexed::ShardedKeyValueStoreSpec>
registration;
} | #include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <initializer_list>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_check.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "re2/re2.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/write.h"
#include "riegeli/digests/crc32c_digester.h"
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache_testutil.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/riegeli/digest_suffixed_writer.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Batch;
using ::tensorstore::Executor;
using ::tensorstore::Future;
using ::tensorstore::Index;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::Transaction;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::KvsBackedTestCache;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::zarr3_sharding_indexed::EntryId;
using ::tensorstore::zarr3_sharding_indexed::EntryIdToKey;
using ::tensorstore::zarr3_sharding_indexed::GetShardedKeyValueStore;
using ::tensorstore::zarr3_sharding_indexed::ShardedKeyValueStoreParameters;
using ::tensorstore::zarr3_sharding_indexed::ShardIndexLocation;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
absl::Cord Bytes(std::initializer_list<unsigned char> x) {
return absl::Cord(std::string(x.begin(), x.end()));
}
absl::Cord WithCrc32c(absl::Cord input) {
absl::Cord output;
riegeli::CordWriter writer{&output};
TENSORSTORE_CHECK_OK(riegeli::Write(
input, tensorstore::internal::DigestSuffixedWriter<
riegeli::Crc32cDigester,
tensorstore::internal::LittleEndianDigestWriter>{&writer}));
ABSL_CHECK(writer.Close());
return output;
}
class GetKey {
public:
GetKey(bool sequential, std::vector<Index> grid_shape)
: sequential_(sequential),
grid_shape_(std::move(grid_shape)),
num_entries_(
tensorstore::ProductOfExtents(span<const Index>(grid_shape_))) {}
std::string operator()(std::string key) const {
auto it = key_to_entry_id_.find(key);
if (it == key_to_entry_id_.end()) {
ABSL_CHECK_LT(entry_id_to_key_.size(), num_entries_);
while (true) {
auto x = sequential_ ? next_entry_id_++ : absl::Uniform<EntryId>(gen_);
x = x % num_entries_;
if (entry_id_to_key_.emplace(x, key).second) {
it = key_to_entry_id_.emplace(key, x).first;
break;
}
}
}
return EntryIdToKey(it->second, grid_shape_);
}
private:
bool sequential_;
std::vector<Index> grid_shape_;
EntryId num_entries_;
mutable EntryId next_entry_id_ = 0;
mutable absl::BitGen gen_;
mutable absl::flat_hash_map<std::string, EntryId> key_to_entry_id_;
mutable absl::flat_hash_map<EntryId, std::string> entry_id_to_key_;
};
kvstore::DriverPtr GetDefaultStore(kvstore::DriverPtr base_kvstore,
std::string base_kvstore_path,
Executor executor,
CachePool::StrongPtr cache_pool,
const std::vector<Index>& grid_shape) {
ShardedKeyValueStoreParameters params;
params.base_kvstore = base_kvstore;
params.base_kvstore_path = base_kvstore_path;
params.executor = executor;
params.cache_pool = CachePool::WeakPtr(cache_pool);
TENSORSTORE_CHECK_OK_AND_ASSIGN(
auto index_codecs,
ZarrCodecChainSpec::FromJson(
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
{{"name", "crc32c"}}}));
params.index_params.index_location = ShardIndexLocation::kEnd;
TENSORSTORE_CHECK_OK(
params.index_params.Initialize(index_codecs, grid_shape));
return GetShardedKeyValueStore(std::move(params));
}
TEST(ShardedKeyValueStoreTest, BasicFunctionality) {
std::vector<std::pair<std::string, tensorstore::Executor>> executors{
{"inline", tensorstore::InlineExecutor{}},
{"thread_pool", tensorstore::internal::DetachedThreadPool(2)}};
for (const auto& [executor_name, executor] : executors) {
for (const auto sequential_ids : {true, false}) {
auto cache_pool = CachePool::Make(kSmallCacheLimits);
auto base_kv_store = tensorstore::GetMemoryKeyValueStore();
const int64_t num_entries = 100;
SCOPED_TRACE(executor_name);
auto store = GetDefaultStore(base_kv_store, "shard_path", executor,
cache_pool, {num_entries});
GetKey get_key_fn(sequential_ids, {num_entries});
tensorstore::internal::TestKeyValueReadWriteOps(store, get_key_fn);
}
}
}
TEST(ShardedKeyValueStoreTest, DescribeKey) {
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
int64_t num_entries = 100;
std::vector<Index> grid_shape{num_entries};
kvstore::DriverPtr store =
GetDefaultStore(base_kv_store, "shard_path",
tensorstore::InlineExecutor{}, cache_pool, grid_shape);
for (const auto& [key, description] :
std::vector<std::pair<uint32_t, std::string>>{
{0, "shard entry {0}/{100} in \"shard_path\""},
{1, "shard entry {1}/{100} in \"shard_path\""},
}) {
EXPECT_EQ(description, store->DescribeKey(EntryIdToKey(key, grid_shape)));
}
}
class RawEncodingTest : public ::testing::Test {
protected:
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr GetStore(const std::vector<Index>& grid_shape) {
return GetDefaultStore(base_kv_store, "shard_path",
tensorstore::InlineExecutor{}, cache_pool,
grid_shape);
}
};
TEST_F(RawEncodingTest, MultipleUnconditionalWrites) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
std::vector<absl::Cord> values{absl::Cord("abc"), absl::Cord("aaaaa"),
absl::Cord("efgh")};
std::vector<Future<TimestampedStorageGeneration>> futures;
auto key = EntryIdToKey(10, grid_shape);
tensorstore::Transaction txn(tensorstore::isolated);
for (auto value : values) {
futures.push_back(kvstore::WriteCommitted(KvStore{store, txn}, key, value));
}
txn.CommitAsync().IgnoreFuture();
std::vector<Result<TimestampedStorageGeneration>> results;
for (const auto& future : futures) {
results.push_back(future.result());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto shard_read,
base_kv_store->Read("shard_path").result());
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
for (size_t i = 0; i < results.size(); ++i) {
if (results[i] && results[i]->generation == shard_read.stamp.generation) {
EXPECT_THAT(store->Read(key).result(),
MatchesKvsReadResult(values[i], results[i]->generation));
}
}
}
TEST_F(RawEncodingTest, List) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
std::map<std::string, absl::Cord> values{
{EntryIdToKey(1, grid_shape), absl::Cord("a")},
{EntryIdToKey(2, grid_shape), absl::Cord("bc")},
{EntryIdToKey(3, grid_shape), absl::Cord("def")},
{EntryIdToKey(10, grid_shape), absl::Cord("xyz")}};
for (auto [key, value] : values) {
TENSORSTORE_EXPECT_OK(store->Write(key, value));
}
EXPECT_THAT(tensorstore::internal::GetMap(store),
::testing::Optional(::testing::ElementsAreArray(values)));
}
TEST_F(RawEncodingTest, WritesAndDeletes) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
StorageGeneration gen1, gen2, gen3;
{
tensorstore::Transaction txn(tensorstore::isolated);
auto init_future1 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(1, grid_shape), absl::Cord("a"));
auto init_future2 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(2, grid_shape), absl::Cord("bc"));
auto init_future3 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(3, grid_shape), absl::Cord("def"));
txn.CommitAsync().IgnoreFuture();
gen1 = init_future1.value().generation;
gen2 = init_future2.value().generation;
gen3 = init_future3.value().generation;
}
tensorstore::Transaction txn(tensorstore::isolated);
auto future1 =
kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()});
auto future2 =
kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape),
absl::Cord("ww"), {gen2});
auto future3 =
kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape),
absl::Cord("xx"), {gen2});
auto future4 =
kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(4, grid_shape),
absl::Cord("zz"), {StorageGeneration::NoValue()});
auto future5 = kvstore::DeleteCommitted(KvStore{store, txn},
EntryIdToKey(3, grid_shape), {gen3});
txn.CommitAsync().IgnoreFuture();
EXPECT_THAT(future1.result(), MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto shard_read,
base_kv_store->Read("shard_path").result());
EXPECT_THAT(
std::vector({future2.result(), future3.result()}),
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesKvsReadResult(absl::Cord("a")));
EXPECT_THAT(store->Read(EntryIdToKey(2, grid_shape)).result(),
MatchesKvsReadResult(
!StorageGeneration::IsUnknown(future2.result()->generation)
? absl::Cord("ww")
: absl::Cord("xx")));
EXPECT_THAT(store->Read(EntryIdToKey(3, grid_shape)).result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(store->Read(EntryIdToKey(4, grid_shape)).result(),
MatchesKvsReadResult(absl::Cord("zz")));
}
std::vector<std::vector<Result<TimestampedStorageGeneration>>>
TestOrderDependentWrites(
std::function<void()> init,
std::function<Future<TimestampedStorageGeneration>()> op0,
std::function<Future<TimestampedStorageGeneration>()> op1,
std::function<void()> finalize) {
std::vector<std::vector<Result<TimestampedStorageGeneration>>> all_results;
for (int i = 0; i < 2; ++i) {
std::vector<Future<TimestampedStorageGeneration>> futures(2);
init();
if (i == 0) {
futures[0] = op0();
futures[1] = op1();
} else {
futures[1] = op1();
futures[0] = op0();
}
finalize();
all_results.push_back({futures[0].result(), futures[1].result()});
}
return all_results;
}
TEST_F(RawEncodingTest, WriteThenDelete) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
TENSORSTORE_ASSERT_OK(
store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a")).result());
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesKvsReadResult(absl::Cord("a")));
TENSORSTORE_ASSERT_OK(store->Delete(EntryIdToKey(1, grid_shape)).result());
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesKvsReadResultNotFound());
}
TEST_F(RawEncodingTest, MultipleDeleteExisting) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
StorageGeneration gen;
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
gen = store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a"))
.value()
.generation;
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::DeleteCommitted(KvStore{store, txn},
EntryIdToKey(1, grid_shape),
{gen});
},
[&] {
return kvstore::DeleteCommitted(
KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::UnorderedElementsAre(
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(
StorageGeneration::NoValue())),
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()),
MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, WriteWithUnmatchedConditionAfterDelete) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
store->Delete(EntryIdToKey(0, grid_shape)).value();
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::WriteCommitted(KvStore{store, txn},
EntryIdToKey(0, grid_shape),
absl::Cord("a"));
},
[&] {
return kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(0, grid_shape),
absl::Cord("b"),
{StorageGeneration::FromString("g")});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::Each(::testing::ElementsAre(
MatchesTimestampedStorageGeneration(
::testing::AllOf(::testing::Not(StorageGeneration::NoValue()),
::testing::Not(StorageGeneration::Invalid()))),
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, MultipleDeleteNonExisting) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
tensorstore::Transaction txn(tensorstore::isolated);
std::vector futures{
kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()}),
kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()})};
txn.CommitAsync().IgnoreFuture();
std::vector results{futures[0].result(), futures[1].result()};
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue())));
}
TEST_F(RawEncodingTest, ShardIndexTooShort) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
base_kv_store->Write("shard_path", Bytes({1, 2, 3})).value();
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
RE2::QuoteMeta("Error reading shard index in \"shard_path\": "
"Requested byte range [-1604, ?) is not valid "
"for value of size 3")));
EXPECT_THAT(
store->Write(EntryIdToKey(10, grid_shape), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading \"shard_path\": "
"Existing shard has size of 3 bytes, but expected at least "
"1604 bytes"));
}
TEST_F(RawEncodingTest, ShardIndexByteRangeOverflow) {
std::vector<Index> grid_shape{2};
kvstore::DriverPtr store = GetStore(grid_shape);
auto content = WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}));
TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content));
EXPECT_THAT(
store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading shard index in \"shard_path\": "
"Invalid shard index entry 1 with offset=.*, length=.*"));
}
TEST_F(RawEncodingTest, ShardIndexEntryByteRangeOutOfRange) {
std::vector<Index> grid_shape{2};
kvstore::DriverPtr store = GetStore(grid_shape);
auto content = WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
37, 0, 0, 0, 0, 0, 0, 0,
}));
TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content));
EXPECT_THAT(
store->Write(EntryIdToKey(1, grid_shape), absl::Cord("x")).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading \"shard_path\": "
"Shard index entry 1 with byte range .* is invalid "
"for shard of size .*"));
}
TEST_F(RawEncodingTest, ShardIndexInvalidChecksum) {
std::vector<Index> grid_shape{2};
kvstore::DriverPtr store = GetStore(grid_shape);
auto content = Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
});
content.Append("abcd");
TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content));
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading shard index in \"shard_path\": "
"Digest mismatch.*"));
}
class UnderlyingKeyValueStoreTest : public ::testing::Test {
protected:
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
kvstore::DriverPtr GetStore(std::vector<Index> grid_shape) {
return GetDefaultStore(mock_store, "shard_path",
tensorstore::InlineExecutor{}, cache_pool,
grid_shape);
}
std::vector<Index> grid_shape{5};
kvstore::DriverPtr store = GetStore(grid_shape);
};
TEST_F(UnderlyingKeyValueStoreTest, Read) {
absl::Time init_time = UniqueNow();
absl::Time shard_index_time;
{
auto future = store->Read(EntryIdToKey(2, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(init_time));
shard_index_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})),
{StorageGeneration::FromString("g0"), shard_index_time}});
}
ASSERT_FALSE(future.ready()) << future.status();
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(EntryIdToKey(3, grid_shape), options);
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(shard_index_time));
}
{
auto req_time = UniqueNow();
auto future = store->Read(EntryIdToKey(3, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
shard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), shard_index_time}));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(shard_index_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(EntryIdToKey(2, grid_shape), options);
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
EXPECT_EQ(init_time, req.options.staleness_bound);
read_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(EntryIdToKey(2, grid_shape), options);
absl::Time abort_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(init_time, req.options.staleness_bound);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
abort_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), abort_time}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Ge(abort_time));
shard_index_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})),
{StorageGeneration::FromString("g1"), shard_index_time}});
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g1"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 16), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
Bytes({4, 5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g1"), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({4, 5, 6, 7, 8, 9}),
StorageGeneration::FromString("g1"), read_time));
}
}
TEST_F(UnderlyingKeyValueStoreTest, TransactionReadThenCommit) {
tensorstore::Transaction txn(tensorstore::isolated);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
{
auto future =
kvstore::Read(KvStore{store, txn}, EntryIdToKey(2, grid_shape), {});
{
auto req = mock_store->read_requests.pop();
req(memory_store);
ASSERT_EQ(0, mock_store->read_requests.size());
}
EXPECT_THAT(future.result(),
::testing::Optional(MatchesKvsReadResultNotFound()));
}
auto commit_future = txn.CommitAsync();
TENSORSTORE_ASSERT_OK(commit_future.result());
EXPECT_EQ(0, mock_store->read_requests.size());
}
TEST_F(UnderlyingKeyValueStoreTest,
ReadConcurrentDeleteAfterReadingMinishardIndex) {
auto req_time = UniqueNow();
auto future = store->Read(EntryIdToKey(2, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})),
{StorageGeneration::FromString("g0"), absl::Now()}});
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(ReadResult{
ReadResult::kMissing, {}, {StorageGeneration::NoValue(), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesKvsReadResultNotFound(read_time));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingShardIndex) {
auto future = store->Read(EntryIdToKey(2, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading shard index in \"shard_path\": "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingData) {
auto future = store->Read(EntryIdToKey(0x2, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})),
{StorageGeneration::FromString("g0"), absl::Now()}});
}
ASSERT_FALSE(future.ready()) << future.status();
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadInvalidKey) {
auto future = store->Read("abc", {});
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteInvalidKey) {
auto future = store->Write("abc", absl::Cord("x"));
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, DeleteInvalidKey) {
auto future = store->Delete("abc");
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithNoExistingShard) {
grid_shape = {2};
store = GetStore(grid_shape);
auto future = store->Write(EntryIdToKey(1, grid_shape), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
req.promise.SetResult(ReadResult{
ReadResult::kMissing, {}, {StorageGeneration::NoValue(), absl::Now()}});
}
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
req.options.generation_conditions.if_equal);
auto expected = Bytes({
1, 2, 3,
});
expected.Append(WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
EXPECT_THAT(req.value, ::testing::Optional(expected));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, UnconditionalWrite) {
grid_shape = {2};
store = GetStore(grid_shape);
auto txn = Transaction(tensorstore::isolated);
auto future1 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(0, grid_shape), Bytes({1, 2, 3}));
auto future2 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(1, grid_shape), Bytes({4, 5, 6}));
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_EQ(0, mock_store->write_requests.size());
txn.CommitAsync().IgnoreFuture();
ASSERT_EQ(0, mock_store->read_requests.size());
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
auto expected = Bytes({
1, 2, 3,
4, 5, 6,
});
expected.Append(WithCrc32c(Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
EXPECT_THAT(req.value, ::testing::Optional(expected));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future1.ready());
ASSERT_TRUE(future2.ready());
EXPECT_THAT(future1.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
EXPECT_THAT(future2.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, ConditionalWriteDespiteMaxChunks) {
grid_shape = {2};
store = GetStore(grid_shape);
auto future = store->Write(EntryIdToKey(0, grid_shape), Bytes({1, 2, 3}),
{StorageGeneration::NoValue()});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
req.promise.SetResult(ReadResult{
ReadResult::kMissing, {}, {StorageGeneration::NoValue(), absl::Now()}});
}
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
req.options.generation_conditions.if_equal);
}
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithNoExistingShardError) {
auto future = store->Write(EntryIdToKey(1, grid_shape), Bytes({1, 2, 3}));
future.Force();
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
req.promise.SetResult(ReadResult{
ReadResult::kMissing, {}, {StorageGeneration::NoValue(), absl::Now()}});
}
{
auto req = mock_store->write_requests.pop_nonblock().value();
req.promise.SetResult(absl::UnknownError("Write error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesStatus(absl::StatusCode::kUnknown,
"Error writing \"shard_path\": "
"Write error"));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithExistingShard) {
grid_shape = {2};
store = GetStore(grid_shape);
auto future = store->Write(EntryIdToKey(0, grid_shape), Bytes({1, 2, 3}));
ASSERT_FALSE(future.ready()) << future.status();
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
auto content = Bytes({
4, 5, 6,
});
content.Append(WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
req.promise.SetResult(
ReadResult{ReadResult::kValue,
content,
{StorageGeneration::FromString("g0"), absl::Now()}});
}
ASSERT_FALSE(future.ready()) << future.status();
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
auto content = Bytes({
1, 2, 3,
4, 5, 6,
});
content.Append(WithCrc32c(Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
EXPECT_THAT(req.value, content);
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g1"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g1"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithExistingShardReadError) {
auto future = store->Write(EntryIdToKey(1, grid_shape), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesStatus(absl::StatusCode::kUnknown,
"Error reading \"shard_path\": "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, DeleteRangeWhenEmpty) {
grid_shape = {2};
store = GetStore(grid_shape);
auto future = store->DeleteRange({});
future.Force();
{
auto req = mock_store->write_requests.pop();
ASSERT_EQ(0, mock_store->write_requests.size());
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_TRUE(StorageGeneration::IsUnknown(
req.options.generation_conditions.if_equal));
EXPECT_EQ(std::nullopt, req.value);
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g1"),
absl::Now());
}
ASSERT_TRUE(future.ready());
TENSORSTORE_ASSERT_OK(future);
}
TEST_F(UnderlyingKeyValueStoreTest, BatchRead) {
cache_pool = CachePool::Make({});
auto memory_store = tensorstore::GetMemoryKeyValueStore();
mock_store->forward_to = memory_store;
mock_store->log_requests = true;
mock_store->handle_batch_requests = true;
grid_shape = {3};
store = GetStore(grid_shape);
TENSORSTORE_ASSERT_OK(
store->Write(EntryIdToKey(0, grid_shape), absl::Cord("abc")).result());
TENSORSTORE_ASSERT_OK(
store->Write(EntryIdToKey(1, grid_shape), absl::Cord("def")).result());
mock_store->request_log.pop_all();
{
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(EntryIdToKey(0, grid_shape), options),
store->Read(EntryIdToKey(1, grid_shape), options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(2));
}
{
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(EntryIdToKey(0, grid_shape), options),
store->Read(EntryIdToKey(1, grid_shape), options),
store->Read(EntryIdToKey(2, grid_shape), options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(futures[2].result(), MatchesKvsReadResultNotFound());
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(1));
}
{
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options1;
options1.batch = Batch::New();
kvstore::ReadOptions options2;
options2.batch = options1.batch;
options2.generation_conditions.if_not_equal =
StorageGeneration::Invalid();
futures = {
store->Read(EntryIdToKey(0, grid_shape), options1),
store->Read(EntryIdToKey(1, grid_shape), options1),
store->Read(EntryIdToKey(2, grid_shape), options2),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(futures[2].result(), MatchesKvsReadResultNotFound());
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(2));
}
}
class ReadModifyWriteTest : public ::testing::Test {
protected:
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
tensorstore::kvstore::DriverPtr memory_store =
tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr GetStore(int64_t num_entries = 100) {
return GetDefaultStore(mock_store, "shard_path",
tensorstore::InlineExecutor{},
CachePool::Make(CachePool::Limits{}), {num_entries});
}
auto GetKvsBackedCache(kvstore::DriverPtr store = {}) {
if (!store) store = GetStore();
return GetCache<KvsBackedTestCache>(
CachePool::Make(CachePool::Limits{}).get(), "",
[&] { return std::make_unique<KvsBackedTestCache>(store); });
}
};
TEST_F(ReadModifyWriteTest, MultipleCaches) {
std::vector<Index> grid_shape{100};
auto cache1 = GetKvsBackedCache();
auto cache2 = GetKvsBackedCache();
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "def"));
auto read_future = GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(),
::testing::Optional(absl::Cord("abcdef")));
}
transaction.CommitAsync().IgnoreFuture();
auto write_req = mock_store->write_requests.pop();
write_req(memory_store);
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(ReadModifyWriteTest, MultiplePhasesMultipleCaches) {
std::vector<Index> grid_shape{100};
auto cache1 = GetKvsBackedCache();
auto cache2 = GetKvsBackedCache();
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "def"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "ghi"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "jkl"));
auto read_future = GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(),
::testing::Optional(absl::Cord("abcdefghijkl")));
}
transaction.CommitAsync().IgnoreFuture();
mock_store->write_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->write_requests.pop()(memory_store);
TENSORSTORE_EXPECT_OK(transaction.future());
}
TENSORSTORE_GLOBAL_INITIALIZER {
using ::tensorstore::internal::KvsBackedCacheBasicTransactionalTestOptions;
using ::tensorstore::internal::RegisterKvsBackedCacheBasicTransactionalTest;
for (bool underlying_atomic : {false, true}) {
KvsBackedCacheBasicTransactionalTestOptions options;
const int64_t num_entries = 100;
options.test_name = tensorstore::StrCat(
"ZarrShardingIndexed/underlying_atomic=", underlying_atomic);
options.get_store = [=] {
return GetDefaultStore(
tensorstore::GetMemoryKeyValueStore(underlying_atomic),
"shard_path", tensorstore::InlineExecutor{},
CachePool::Make(CachePool::Limits{}), {num_entries});
};
options.delete_range_supported = true;
options.multi_key_atomic_supported = true;
options.get_key_getter = [=] {
return [getter = std::make_shared<GetKey>(
true, std::vector<Index>{num_entries})](
auto key) { return (*getter)(key); };
};
RegisterKvsBackedCacheBasicTransactionalTest(options);
}
}
TEST(ShardedKeyValueStoreTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.roundtrip_key = std::string(8, '\0');
options.full_base_spec = {{"driver", "memory"}, {"path", "shard_path"}};
options.full_spec = {
{"driver", "zarr3_sharding_indexed"},
{"base", options.full_base_spec},
{"grid_shape", {100, 200}},
{"index_location", "end"},
{"index_codecs",
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}}},
};
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(ShardedKeyValueStoreTest, SpecRoundtripFile) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.roundtrip_key = std::string(8, '\0');
options.full_base_spec = {{"driver", "file"},
{"path", tempdir.path() + "/shard_path"}};
options.full_spec = {
{"driver", "zarr3_sharding_indexed"},
{"base", options.full_base_spec},
{"grid_shape", {100, 200}},
{"index_location", "end"},
{"index_codecs",
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}}},
};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(ShardedKeyValueStoreTest, Base) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec,
kvstore::Spec::FromJson(
{{"driver", "zarr3_sharding_indexed"},
{"base", "memory:
{"grid_shape", {100, 200}},
{"index_location", "end"},
{"index_codecs",
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}}},
{"path", "1"}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_spec,
kvstore::Spec::FromJson("memory:
EXPECT_THAT(spec.base(), ::testing::Optional(base_spec));
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open(spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open(base_spec, context).result());
EXPECT_THAT(store.base(), ::testing::Optional(base_store));
auto transaction = tensorstore::Transaction(tensorstore::atomic_isolated);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store_with_txn, store | transaction);
EXPECT_THAT(store_with_txn.base(), base_store | transaction);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
cd683c63-0679-46b1-95e4-b2624b9ce00a | cpp | google/tensorstore | uint64_sharded | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_test.cc | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include <algorithm>
#include "absl/base/optimization.h"
#include "absl/strings/str_format.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
namespace {
namespace jb = tensorstore::internal_json_binding;
constexpr auto HashFunctionBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) {
using HashFunction = ShardingSpec::HashFunction;
return jb::Enum<HashFunction, const char*>({
{HashFunction::identity, "identity"},
{HashFunction::murmurhash3_x86_128, "murmurhash3_x86_128"},
})(is_loading, options, obj, j);
};
constexpr auto DefaultableDataEncodingJsonBinder =
[](auto is_loading, const auto& options, auto* obj, auto* j) {
using DataEncoding = ShardingSpec::DataEncoding;
return jb::DefaultValue<jb::kAlwaysIncludeDefaults>(
[](auto* v) { *v = DataEncoding::raw; }, DataEncodingJsonBinder)(
is_loading, options, obj, j);
};
}
TENSORSTORE_DEFINE_JSON_BINDER(
DataEncodingJsonBinder, jb::Enum<ShardingSpec::DataEncoding, const char*>({
{ShardingSpec::DataEncoding::raw, "raw"},
{ShardingSpec::DataEncoding::gzip, "gzip"},
}))
std::ostream& operator<<(std::ostream& os, ShardingSpec::HashFunction x) {
return os << jb::ToJson(x, HashFunctionBinder).value();
}
void to_json(::nlohmann::json& out,
ShardingSpec::HashFunction x) {
out = jb::ToJson(x, HashFunctionBinder).value();
}
std::ostream& operator<<(std::ostream& os, ShardingSpec::DataEncoding x) {
return os << jb::ToJson(x, DataEncodingJsonBinder).value();
}
std::ostream& operator<<(std::ostream& os, const ShardingSpec& x) {
return os << jb::ToJson(x).value();
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ShardingSpec, [](auto is_loading,
const auto& options,
auto* obj, auto* j) {
return jb::Object(
jb::Member("@type",
jb::Constant([] { return "neuroglancer_uint64_sharded_v1"; })),
jb::Member("preshift_bits", jb::Projection(&ShardingSpec::preshift_bits,
jb::Integer<int>(0, 64))),
jb::Member("minishard_bits", jb::Projection(&ShardingSpec::minishard_bits,
jb::Integer<int>(0, 32))),
jb::Member("shard_bits",
jb::Dependent([](auto is_loading, const auto& options,
auto* obj, auto* j) {
return jb::Projection(
&ShardingSpec::shard_bits,
jb::Integer<int>(0, 64 - obj->minishard_bits));
})),
jb::Member("hash", jb::Projection(&ShardingSpec::hash_function,
HashFunctionBinder)),
jb::Member("data_encoding",
jb::Projection(&ShardingSpec::data_encoding,
DefaultableDataEncodingJsonBinder)),
jb::Member("minishard_index_encoding",
jb::Projection(&ShardingSpec::minishard_index_encoding,
DefaultableDataEncodingJsonBinder)))(
is_loading, options, obj, j);
})
bool operator==(const ShardingSpec& a, const ShardingSpec& b) {
return a.hash_function == b.hash_function &&
a.preshift_bits == b.preshift_bits &&
a.minishard_bits == b.minishard_bits && a.shard_bits == b.shard_bits &&
a.data_encoding == b.data_encoding &&
a.minishard_index_encoding == b.minishard_index_encoding;
}
std::string GetShardKey(const ShardingSpec& sharding_spec,
std::string_view prefix, uint64_t shard_number) {
return internal::JoinPath(
prefix,
absl::StrFormat("%0*x.shard", CeilOfRatio(sharding_spec.shard_bits, 4),
shard_number));
}
namespace {
constexpr uint64_t ShiftRightUpTo64(uint64_t x, int amount) {
if (amount == 64) return 0;
return x >> amount;
}
uint64_t GetLowBitMask(int num_bits) {
if (num_bits == 64) return ~uint64_t(0);
return (uint64_t(1) << num_bits) - 1;
}
}
uint64_t HashChunkId(ShardingSpec::HashFunction h, uint64_t key) {
switch (h) {
case ShardingSpec::HashFunction::identity:
return key;
case ShardingSpec::HashFunction::murmurhash3_x86_128: {
uint32_t out[4] = {0, 0, 0};
MurmurHash3_x86_128Hash64Bits(key, out);
return (static_cast<uint64_t>(out[1]) << 32) | out[0];
}
}
ABSL_UNREACHABLE();
}
ChunkCombinedShardInfo GetChunkShardInfo(const ShardingSpec& sharding_spec,
ChunkId chunk_id) {
ChunkCombinedShardInfo result;
const uint64_t hash_input =
ShiftRightUpTo64(chunk_id.value, sharding_spec.preshift_bits);
const uint64_t hash_output =
HashChunkId(sharding_spec.hash_function, hash_input);
result.shard_and_minishard =
hash_output &
GetLowBitMask(sharding_spec.minishard_bits + sharding_spec.shard_bits);
return result;
}
ChunkSplitShardInfo GetSplitShardInfo(const ShardingSpec& sharding_spec,
ChunkCombinedShardInfo combined_info) {
ChunkSplitShardInfo result;
result.minishard = combined_info.shard_and_minishard &
GetLowBitMask(sharding_spec.minishard_bits);
result.shard = ShiftRightUpTo64(combined_info.shard_and_minishard,
sharding_spec.minishard_bits) &
GetLowBitMask(sharding_spec.shard_bits);
return result;
}
ChunkCombinedShardInfo GetCombinedShardInfo(const ShardingSpec& sharding_spec,
ChunkSplitShardInfo split_info) {
ChunkCombinedShardInfo result;
result.shard_and_minishard = split_info.minishard;
if (sharding_spec.minishard_bits != 64) {
result.shard_and_minishard |=
(split_info.shard << sharding_spec.minishard_bits);
}
return result;
}
int64_t ShardIndexSize(const ShardingSpec& sharding_spec) {
return static_cast<int64_t>(16) << sharding_spec.minishard_bits;
}
Result<ByteRange> GetAbsoluteShardByteRange(ByteRange relative_range,
const ShardingSpec& sharding_spec) {
const int64_t offset = ShardIndexSize(sharding_spec);
ByteRange result;
if (internal::AddOverflow(relative_range.inclusive_min, offset,
&result.inclusive_min) ||
internal::AddOverflow(relative_range.exclusive_max, offset,
&result.exclusive_max)) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Byte range ", relative_range,
" relative to the end of the shard index (", offset, ") is not valid"));
}
return result;
}
const EncodedChunk* FindChunk(span<const EncodedChunk> chunks,
MinishardAndChunkId minishard_and_chunk_id) {
const auto chunk_it = std::lower_bound(
chunks.begin(), chunks.end(), minishard_and_chunk_id,
[](const auto& chunk, const auto& minishard_and_chunk_id) {
return chunk.minishard_and_chunk_id < minishard_and_chunk_id;
});
if (chunk_it == chunks.end() ||
chunk_it->minishard_and_chunk_id != minishard_and_chunk_id) {
return nullptr;
}
return &*chunk_it;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::neuroglancer_uint64_sharded::MinishardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
TEST(ShardingSpecTest, Comparison) {
ShardingSpec a{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec b{
ShardingSpec::HashFunction::murmurhash3_x86_128,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec c{
ShardingSpec::HashFunction::identity,
2,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec d{
ShardingSpec::HashFunction::identity,
1,
5,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec e{
ShardingSpec::HashFunction::identity,
1,
2,
9,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec f{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::gzip,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec g{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_EQ(e, e);
EXPECT_EQ(f, f);
EXPECT_EQ(g, g);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(a, e);
EXPECT_NE(a, f);
EXPECT_NE(a, g);
}
TEST(ShardingSpecTest, ToJson) {
ShardingSpec a{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
EXPECT_EQ(::nlohmann::json({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"data_encoding", "raw"},
{"minishard_index_encoding", "gzip"}}),
::nlohmann::json(a));
}
TEST(ShardingSpecTest, Parse) {
for (auto h : {ShardingSpec::HashFunction::identity,
ShardingSpec::HashFunction::murmurhash3_x86_128}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", ::nlohmann::json(h)},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"data_encoding", "raw"},
{"minishard_index_encoding", "gzip"}}),
::testing::Optional(ShardingSpec{
h,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
}));
}
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "gzip"}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::murmurhash3_x86_128,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
}));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"data_encoding", "gzip"}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::murmurhash3_x86_128,
1,
2,
3,
ShardingSpec::DataEncoding::gzip,
ShardingSpec::DataEncoding::raw,
}));
for (const char* k :
{"@type", "hash", "preshift_bits", "minishard_bits", "shard_bits"}) {
::nlohmann::json j{{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "gzip"}};
j.erase(k);
EXPECT_THAT(ShardingSpec::FromJson(j),
MatchesStatus(absl::StatusCode::kInvalidArgument));
j[k] = nullptr;
EXPECT_THAT(ShardingSpec::FromJson(j),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v2"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "gzip"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"neuroglancer_uint64_sharded_v2\".*"));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "invalid_hash"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "gzip"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"invalid_hash\".*"));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", 1234}}),
MatchesStatus(absl::StatusCode::kInvalidArgument, ".*1234.*"));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "invalid_encoding"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"invalid_encoding\".*"));
for (int i : {0, 1, 63, 64}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", i},
{"minishard_bits", 2},
{"shard_bits", 3}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::identity,
i,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
}));
}
for (int i : {-1, -2, 65, 66}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", i},
{"minishard_bits", 2},
{"shard_bits", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (int i : {0, 1, 31, 32}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", i},
{"shard_bits", 0}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::identity,
1,
i,
0,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
}));
}
for (int i : {-1, -2, 33, 34, 35}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", i},
{"shard_bits", 0}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (int i : {0, 1, 64 - 8, 64 - 7}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 7},
{"shard_bits", i}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::identity,
1,
7,
i,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
}));
}
for (int i : {-1, -2, 64 - 6, 64 - 5, 65, 66}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 7},
{"shard_bits", i}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
EXPECT_THAT(ShardingSpec::FromJson("invalid"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(MinishardIndexEntryTest, Comparison) {
MinishardIndexEntry a{{1}, {2, 3}};
MinishardIndexEntry b{{1}, {3, 4}};
MinishardIndexEntry c{{2}, {2, 3}};
MinishardIndexEntry d{{2}, {3, 4}};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_FALSE(a != a);
EXPECT_FALSE(a == b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(b, c);
EXPECT_NE(b, d);
EXPECT_NE(c, d);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6ac94486-7e5d-4dfb-9030-e1765d91f8e5 | cpp | google/tensorstore | uint64_sharded_encoder | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder_test.cc | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include <stddef.h>
#include <stdint.h>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/internal/endian.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
absl::Cord EncodeMinishardIndex(
span<const MinishardIndexEntry> minishard_index) {
internal::FlatCordBuilder builder(minishard_index.size() * 24);
ChunkId prev_chunk_id{0};
int64_t prev_offset = 0;
for (ptrdiff_t i = 0; i < minishard_index.size(); ++i) {
const auto& e = minishard_index[i];
absl::little_endian::Store64(builder.data() + i * 8,
e.chunk_id.value - prev_chunk_id.value);
absl::little_endian::Store64(
builder.data() + minishard_index.size() * 8 + i * 8,
e.byte_range.inclusive_min - prev_offset);
absl::little_endian::Store64(
builder.data() + minishard_index.size() * 16 + i * 8,
e.byte_range.exclusive_max - e.byte_range.inclusive_min);
prev_chunk_id = e.chunk_id;
prev_offset = e.byte_range.exclusive_max;
}
return std::move(builder).Build();
}
absl::Cord EncodeShardIndex(span<const ShardIndexEntry> shard_index) {
internal::FlatCordBuilder builder(shard_index.size() * 16);
for (ptrdiff_t i = 0; i < shard_index.size(); ++i) {
const auto& e = shard_index[i];
absl::little_endian::Store64(builder.data() + i * 16, e.inclusive_min);
absl::little_endian::Store64(builder.data() + i * 16 + 8, e.exclusive_max);
}
return std::move(builder).Build();
}
ShardEncoder::ShardEncoder(const ShardingSpec& sharding_spec,
WriteFunction write_function)
: sharding_spec_(sharding_spec),
write_function_(std::move(write_function)),
shard_index_(static_cast<size_t>(1) << sharding_spec_.minishard_bits),
cur_minishard_(0),
data_file_offset_(0) {}
ShardEncoder::ShardEncoder(const ShardingSpec& sharding_spec, absl::Cord& out)
: ShardEncoder(sharding_spec, [&out](const absl::Cord& buffer) {
out.Append(buffer);
return absl::OkStatus();
}) {}
namespace {
Result<int64_t> EncodeData(
const absl::Cord& input, ShardingSpec::DataEncoding encoding,
absl::FunctionRef<absl::Status(const absl::Cord& buffer)> write_function) {
auto encoded = EncodeData(input, encoding);
if (auto status = write_function(encoded); status.ok()) {
return encoded.size();
} else {
return status;
}
}
}
absl::Status ShardEncoder::FinalizeMinishard() {
if (minishard_index_.empty()) return absl::OkStatus();
auto uncompressed_minishard_index = EncodeMinishardIndex(minishard_index_);
TENSORSTORE_ASSIGN_OR_RETURN(
auto num_bytes,
EncodeData(uncompressed_minishard_index,
sharding_spec_.minishard_index_encoding, write_function_));
shard_index_[cur_minishard_] = {data_file_offset_,
data_file_offset_ + num_bytes};
data_file_offset_ += num_bytes;
minishard_index_.clear();
return absl::OkStatus();
}
Result<absl::Cord> ShardEncoder::Finalize() {
TENSORSTORE_RETURN_IF_ERROR(FinalizeMinishard());
return EncodeShardIndex(shard_index_);
}
Result<ByteRange> ShardEncoder::WriteUnindexedEntry(std::uint64_t minishard,
const absl::Cord& data,
bool compress) {
if (minishard != cur_minishard_) {
if (minishard < cur_minishard_) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Minishard ", minishard,
" cannot be written after ", cur_minishard_));
}
TENSORSTORE_RETURN_IF_ERROR(FinalizeMinishard());
cur_minishard_ = minishard;
}
std::string output;
auto start_offset = data_file_offset_;
TENSORSTORE_ASSIGN_OR_RETURN(
auto num_bytes, EncodeData(data,
compress ? sharding_spec_.data_encoding
: ShardingSpec::DataEncoding::raw,
write_function_));
data_file_offset_ += num_bytes;
return ByteRange{start_offset, data_file_offset_};
}
absl::Status ShardEncoder::WriteIndexedEntry(uint64_t minishard,
ChunkId chunk_id,
const absl::Cord& data,
bool compress) {
TENSORSTORE_ASSIGN_OR_RETURN(auto byte_range,
WriteUnindexedEntry(minishard, data, compress));
minishard_index_.push_back({chunk_id, byte_range});
return absl::OkStatus();
}
ShardEncoder::~ShardEncoder() = default;
std::optional<absl::Cord> EncodeShard(const ShardingSpec& spec,
span<const EncodedChunk> chunks) {
absl::Cord shard_data;
ShardEncoder encoder(spec, shard_data);
for (const auto& chunk : chunks) {
TENSORSTORE_CHECK_OK(
encoder.WriteIndexedEntry(chunk.minishard_and_chunk_id.minishard,
chunk.minishard_and_chunk_id.chunk_id,
chunk.encoded_data, false));
}
auto shard_index = encoder.Finalize().value();
if (shard_data.empty()) return std::nullopt;
shard_index.Append(shard_data);
return shard_index;
}
absl::Cord EncodeData(const absl::Cord& input,
ShardingSpec::DataEncoding encoding) {
if (encoding == ShardingSpec::DataEncoding::raw) {
return input;
}
absl::Cord compressed;
zlib::Options options;
options.level = 9;
options.use_gzip_header = true;
zlib::Encode(input, &compressed, options);
return compressed;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace zlib = tensorstore::zlib;
using ::tensorstore::neuroglancer_uint64_sharded::EncodeMinishardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::EncodeShardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::MinishardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardEncoder;
using ::tensorstore::neuroglancer_uint64_sharded::ShardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
absl::Cord Bytes(std::vector<unsigned char> bytes) {
return absl::Cord(std::string_view(
reinterpret_cast<const char*>(bytes.data()), bytes.size()));
}
TEST(EncodeMinishardIndexTest, Empty) {
auto out = EncodeMinishardIndex({});
EXPECT_EQ("", out);
}
TEST(EncodeMinishardIndexTest, SingleEntry) {
auto out = EncodeMinishardIndex(
std::vector<MinishardIndexEntry>{{{0x0123456789abcdef}, {0x11, 0x23}}});
EXPECT_THAT(out, Bytes({
0xef, 0xcd, 0xab, 0x89, 0x67, 0x45, 0x23, 0x01,
0x11, 0, 0, 0, 0, 0, 0, 0,
0x12, 0, 0, 0, 0, 0, 0, 0,
}));
}
TEST(EncodeMinishardIndexTest, MultipleEntries) {
auto out = EncodeMinishardIndex(std::vector<MinishardIndexEntry>{
{{1}, {3, 10}},
{{7}, {12, 15}},
});
EXPECT_THAT(out, Bytes({
1, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 0, 0,
7, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
}));
}
TEST(EncodeShardIndexTest, Basic) {
std::vector<ShardIndexEntry> shard_index{{1, 5}, {7, 10}};
auto out = EncodeShardIndex(shard_index);
EXPECT_THAT(out, Bytes({
1, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
7, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
}));
}
TEST(ShardEncoderTest, Raw) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 0},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
absl::Cord encoded_shard_data;
ShardEncoder shard_encoder(sharding_spec, encoded_shard_data);
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(0, {2},
Bytes({1, 2, 3, 4}),
false));
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(0, {8},
Bytes({6, 7, 8}),
false));
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(1, {3}, Bytes({9, 10}),
false));
auto encoded_shard_index = shard_encoder.Finalize().value();
EXPECT_THAT(encoded_shard_data,
Bytes({
1, 2, 3, 4,
6, 7, 8,
2, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
9, 10,
3, 0, 0, 0, 0, 0, 0, 0,
55, 0, 0, 0, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 0, 0,
}));
EXPECT_THAT(encoded_shard_index,
Bytes({
7, 0, 0, 0, 0, 0, 0, 0,
55, 0, 0, 0, 0, 0, 0, 0,
57, 0, 0, 0, 0, 0, 0, 0,
81, 0, 0, 0, 0, 0, 0, 0,
}));
}
TEST(ShardEncoderTest, Gzip) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 0},
{"data_encoding", "gzip"},
{"minishard_index_encoding", "gzip"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
absl::Cord encoded_shard_data;
ShardEncoder shard_encoder(sharding_spec, encoded_shard_data);
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(0, {2},
Bytes({1, 2, 3, 4}),
true));
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(0, {8},
Bytes({6, 7, 8}),
true));
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(1, {3}, Bytes({9, 10}),
false));
absl::Cord encoded_shard_index = shard_encoder.Finalize().value();
absl::Cord expected_shard_data;
zlib::Options options{9, true};
std::vector<ShardIndexEntry> shard_index(2);
{
std::vector<MinishardIndexEntry> minishard_index(2);
minishard_index[0].chunk_id = {2};
minishard_index[0].byte_range.inclusive_min = expected_shard_data.size();
zlib::Encode(Bytes({1, 2, 3, 4}), &expected_shard_data, options);
minishard_index[0].byte_range.exclusive_max = expected_shard_data.size();
minishard_index[1].chunk_id = {8};
minishard_index[1].byte_range.inclusive_min = expected_shard_data.size();
zlib::Encode(Bytes({6, 7, 8}), &expected_shard_data, options);
minishard_index[1].byte_range.exclusive_max = expected_shard_data.size();
shard_index[0].inclusive_min = expected_shard_data.size();
zlib::Encode(EncodeMinishardIndex(minishard_index), &expected_shard_data,
options);
shard_index[0].exclusive_max = expected_shard_data.size();
}
{
std::vector<MinishardIndexEntry> minishard_index(1);
minishard_index[0].chunk_id = {3};
minishard_index[0].byte_range.inclusive_min = expected_shard_data.size();
expected_shard_data.Append(Bytes({9, 10}));
minishard_index[0].byte_range.exclusive_max = expected_shard_data.size();
shard_index[1].inclusive_min = expected_shard_data.size();
zlib::Encode(EncodeMinishardIndex(minishard_index), &expected_shard_data,
options);
shard_index[1].exclusive_max = expected_shard_data.size();
}
auto expected_shard_index = EncodeShardIndex(shard_index);
EXPECT_EQ(expected_shard_data, encoded_shard_data);
EXPECT_EQ(expected_shard_index, encoded_shard_index);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9973afe3-dec3-41d3-a3be-739f6757b498 | cpp | google/tensorstore | uint64_sharded_decoder | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder_test.cc | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.h"
#include <stddef.h>
#include <stdint.h>
#include <optional>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/internal/endian.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/internal/cord_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
Result<std::vector<MinishardIndexEntry>> DecodeMinishardIndex(
const absl::Cord& input, ShardingSpec::DataEncoding encoding) {
absl::Cord decoded_input;
if (encoding != ShardingSpec::DataEncoding::raw) {
TENSORSTORE_ASSIGN_OR_RETURN(decoded_input, DecodeData(input, encoding));
} else {
decoded_input = input;
}
if ((decoded_input.size() % 24) != 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid minishard index length: ", decoded_input.size()));
}
std::vector<MinishardIndexEntry> result(decoded_input.size() / 24);
static_assert(sizeof(MinishardIndexEntry) == 24);
auto decoded_flat = decoded_input.Flatten();
ChunkId chunk_id{0};
uint64_t byte_offset = 0;
for (size_t i = 0; i < result.size(); ++i) {
auto& entry = result[i];
chunk_id.value += absl::little_endian::Load64(decoded_flat.data() + i * 8);
entry.chunk_id = chunk_id;
byte_offset += absl::little_endian::Load64(decoded_flat.data() + i * 8 +
8 * result.size());
entry.byte_range.inclusive_min = byte_offset;
byte_offset += absl::little_endian::Load64(decoded_flat.data() + i * 8 +
16 * result.size());
entry.byte_range.exclusive_max = byte_offset;
if (!entry.byte_range.SatisfiesInvariants()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid byte range in minishard index for chunk ",
entry.chunk_id.value, ": ", entry.byte_range));
}
}
absl::c_sort(result,
[](const MinishardIndexEntry& a, const MinishardIndexEntry& b) {
return a.chunk_id.value < b.chunk_id.value;
});
return result;
}
std::optional<ByteRange> FindChunkInMinishard(
span<const MinishardIndexEntry> minishard_index, ChunkId chunk_id) {
auto it =
absl::c_lower_bound(minishard_index, chunk_id,
[](const MinishardIndexEntry& e, ChunkId chunk_id) {
return e.chunk_id.value < chunk_id.value;
});
if (it == minishard_index.end() || it->chunk_id.value != chunk_id.value) {
return std::nullopt;
}
return it->byte_range;
}
Result<absl::Cord> DecodeData(const absl::Cord& input,
ShardingSpec::DataEncoding encoding) {
if (encoding == ShardingSpec::DataEncoding::raw) {
return input;
}
absl::Cord uncompressed;
TENSORSTORE_RETURN_IF_ERROR(
zlib::Decode(input, &uncompressed, true));
return uncompressed;
}
Result<ByteRange> DecodeShardIndexEntry(std::string_view input) {
if (input.size() != 16) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Expected 16 bytes, but received: ", input.size(), " bytes"));
}
ByteRange r;
r.inclusive_min = absl::little_endian::Load64(input.data());
r.exclusive_max = absl::little_endian::Load64(input.data() + 8);
if (!r.SatisfiesInvariants()) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Shard index specified invalid byte range: ", r));
}
return r;
}
Result<std::vector<MinishardIndexEntry>>
DecodeMinishardIndexAndAdjustByteRanges(const absl::Cord& encoded,
const ShardingSpec& sharding_spec) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_index,
DecodeMinishardIndex(encoded, sharding_spec.minishard_index_encoding));
for (auto& entry : minishard_index) {
auto result = GetAbsoluteShardByteRange(entry.byte_range, sharding_spec);
if (!result.ok()) {
return MaybeAnnotateStatus(
result.status(),
tensorstore::StrCat("Error decoding minishard index entry for chunk ",
entry.chunk_id.value));
}
entry.byte_range = std::move(result).value();
}
return minishard_index;
}
namespace {
absl::Status SplitMinishard(const ShardingSpec& sharding_spec,
const absl::Cord& shard_data, uint64_t minishard,
span<const MinishardIndexEntry> minishard_index,
std::vector<EncodedChunk>& chunks) {
std::optional<ChunkId> prev_chunk_id;
for (const auto& existing_entry : minishard_index) {
if (prev_chunk_id &&
existing_entry.chunk_id.value == prev_chunk_id->value) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Chunk ", existing_entry.chunk_id.value,
" occurs more than once in the minishard index "
"for minishard ",
minishard));
}
prev_chunk_id = existing_entry.chunk_id;
const auto GetChunkByteRange = [&]() -> Result<ByteRange> {
TENSORSTORE_RETURN_IF_ERROR(
OptionalByteRangeRequest(existing_entry.byte_range)
.Validate(shard_data.size()));
return existing_entry.byte_range;
};
TENSORSTORE_ASSIGN_OR_RETURN(
auto chunk_byte_range, GetChunkByteRange(),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat("Invalid existing byte range for chunk ",
existing_entry.chunk_id.value)));
chunks.push_back(
EncodedChunk{{minishard, existing_entry.chunk_id},
internal::GetSubCord(shard_data, chunk_byte_range)});
}
return absl::OkStatus();
}
}
Result<std::vector<EncodedChunk>> SplitShard(const ShardingSpec& sharding_spec,
const absl::Cord& shard_data) {
std::vector<EncodedChunk> chunks;
if (shard_data.empty()) return chunks;
const uint64_t num_minishards = sharding_spec.num_minishards();
if (shard_data.size() < num_minishards * 16) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Existing shard has size ", shard_data.size(),
", but expected at least: ", num_minishards * 16));
}
std::vector<char> shard_index(16 * num_minishards);
internal::CopyCordToSpan(shard_data, shard_index);
for (uint64_t minishard = 0; minishard < num_minishards; ++minishard) {
const auto GetMinishardIndexByteRange = [&]() -> Result<ByteRange> {
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_index_byte_range,
DecodeShardIndexEntry(
std::string_view(shard_index.data() + 16 * minishard, 16)));
TENSORSTORE_ASSIGN_OR_RETURN(
minishard_index_byte_range,
GetAbsoluteShardByteRange(minishard_index_byte_range, sharding_spec));
TENSORSTORE_RETURN_IF_ERROR(
OptionalByteRangeRequest(minishard_index_byte_range)
.Validate(shard_data.size()));
return minishard_index_byte_range;
};
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_ibr, GetMinishardIndexByteRange(),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Error decoding existing shard index entry for minishard ",
minishard)));
if (minishard_ibr.size() == 0) continue;
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_index,
DecodeMinishardIndexAndAdjustByteRanges(
internal::GetSubCord(shard_data, minishard_ibr), sharding_spec),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Error decoding existing minishard index for minishard ",
minishard)));
TENSORSTORE_RETURN_IF_ERROR(SplitMinishard(
sharding_spec, shard_data, minishard, minishard_index, chunks));
}
return chunks;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace zlib = tensorstore::zlib;
using ::tensorstore::MatchesStatus;
using ::tensorstore::neuroglancer_uint64_sharded::DecodeMinishardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::EncodeMinishardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::MinishardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
void TestEncodeMinishardRoundTrip(
std::vector<MinishardIndexEntry> minishard_index) {
auto out = EncodeMinishardIndex(minishard_index);
absl::Cord compressed;
zlib::Options options{9, true};
zlib::Encode(out, &compressed, options);
EXPECT_THAT(
DecodeMinishardIndex(out, ShardingSpec::DataEncoding::raw),
::testing::Optional(::testing::ElementsAreArray(minishard_index)));
EXPECT_THAT(
DecodeMinishardIndex(compressed, ShardingSpec::DataEncoding::gzip),
::testing::Optional(::testing::ElementsAreArray(minishard_index)));
}
TEST(DecodeMinishardIndexTest, Empty) {
TestEncodeMinishardRoundTrip({});
}
TEST(DecodeMinishardIndexTest, SingleEntry) {
TestEncodeMinishardRoundTrip({{{0x0123456789abcdef}, {0x11, 0x23}}});
}
TEST(DecodeMinishardIndexTest, MultipleEntries) {
TestEncodeMinishardRoundTrip({
{{1}, {3, 10}},
{{7}, {12, 15}},
});
}
TEST(DecodeMinishardIndexTest, InvalidGzip) {
EXPECT_THAT(
DecodeMinishardIndex(absl::Cord("abc"), ShardingSpec::DataEncoding::gzip),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error decoding zlib-compressed data"));
}
TEST(DecodeMinishardIndexTest, InvalidSizeRaw) {
EXPECT_THAT(
DecodeMinishardIndex(absl::Cord("abc"), ShardingSpec::DataEncoding::raw),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid minishard index length: 3"));
}
TEST(DecodeMinishardIndexTest, InvalidSizeGzip) {
absl::Cord temp;
zlib::Options options{9, true};
zlib::Encode(absl::Cord("abc"), &temp, options);
EXPECT_THAT(DecodeMinishardIndex(temp, ShardingSpec::DataEncoding::gzip),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid minishard index length: 3"));
}
TEST(DecodeMinishardIndexTest, InvalidInterval) {
std::vector<MinishardIndexEntry> minishard_index{{{3}, {1, 0}}};
auto encoded = EncodeMinishardIndex(minishard_index);
EXPECT_THAT(
DecodeMinishardIndex(encoded, ShardingSpec::DataEncoding::raw),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Invalid byte range in minishard index for chunk 3: \\[1, 0\\)"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
25ce80e0-cfc7-41c8-8494-1706993cefd2 | cpp | google/tensorstore | neuroglancer_uint64_sharded | tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded_test.cc | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/internal/endian.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_modify_write.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/transaction.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
#include "tensorstore/util/execution/result_sender.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
namespace {
using ::tensorstore::internal::ConvertInvalidArgumentToFailedPrecondition;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore::kvstore::SupportedFeatures;
class MinishardIndexKeyValueStore : public kvstore::Driver {
public:
explicit MinishardIndexKeyValueStore(kvstore::DriverPtr base,
Executor executor,
std::string key_prefix,
const ShardingSpec& sharding_spec)
: base_(std::move(base)),
executor_(std::move(executor)),
key_prefix_(key_prefix),
sharding_spec_(sharding_spec) {}
Future<ReadResult> Read(Key key, ReadOptions options) override;
std::string DescribeKey(std::string_view key) override {
ChunkCombinedShardInfo combined_info;
if (key.size() != sizeof(combined_info)) {
return tensorstore::StrCat("invalid key ", tensorstore::QuoteString(key));
}
std::memcpy(&combined_info, key.data(), sizeof(combined_info));
auto split_info = GetSplitShardInfo(sharding_spec_, combined_info);
return tensorstore::StrCat(
"minishard ", split_info.minishard, " in ",
base_->DescribeKey(
GetShardKey(sharding_spec_, key_prefix_, split_info.shard)));
}
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const final {
}
kvstore::Driver* base() { return base_.get(); }
const ShardingSpec& sharding_spec() { return sharding_spec_; }
const std::string& key_prefix() const { return key_prefix_; }
const Executor& executor() const { return executor_; }
kvstore::DriverPtr base_;
Executor executor_;
std::string key_prefix_;
ShardingSpec sharding_spec_;
};
namespace {
using ShardIndex = uint64_t;
using MinishardIndex = uint64_t;
class MinishardIndexReadOperationState;
using MinishardIndexReadOperationStateBase =
internal_kvstore_batch::BatchReadEntry<
MinishardIndexKeyValueStore,
internal_kvstore_batch::ReadRequest<MinishardIndex>,
ShardIndex, kvstore::ReadGenerationConditions>;
;
class MinishardIndexReadOperationState
: public MinishardIndexReadOperationStateBase,
public internal::AtomicReferenceCount<MinishardIndexReadOperationState> {
public:
explicit MinishardIndexReadOperationState(BatchEntryKey&& batch_entry_key_)
: MinishardIndexReadOperationStateBase(std::move(batch_entry_key_)),
internal::AtomicReferenceCount<MinishardIndexReadOperationState>(
1) {}
private:
Batch retry_batch_{no_batch};
void Submit(Batch::View batch) override {
const auto& executor = driver().executor();
executor(
[this, batch = Batch(batch)] { this->ProcessBatch(std::move(batch)); });
}
void ProcessBatch(Batch batch) {
internal::IntrusivePtr<MinishardIndexReadOperationState> self(
this, internal::adopt_object_ref);
retry_batch_ = Batch::New();
auto minishard_fetch_batch = Batch::New();
for (auto& request : request_batch.requests) {
ProcessMinishard(batch, request, minishard_fetch_batch);
}
}
std::string ShardKey() {
const auto& sharding_spec = driver().sharding_spec();
return GetShardKey(sharding_spec, driver().key_prefix(),
std::get<ShardIndex>(batch_entry_key));
}
void ProcessMinishard(Batch::View batch, Request& request,
Batch minishard_fetch_batch) {
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions =
std::get<kvstore::ReadGenerationConditions>(this->batch_entry_key);
kvstore_read_options.staleness_bound = this->request_batch.staleness_bound;
auto key = std::get<MinishardIndex>(request);
kvstore_read_options.byte_range = OptionalByteRangeRequest{
static_cast<int64_t>(key * 16), static_cast<int64_t>((key + 1) * 16)};
kvstore_read_options.batch = batch;
auto shard_index_read_future = this->driver().base()->Read(
this->ShardKey(), std::move(kvstore_read_options));
shard_index_read_future.Force();
shard_index_read_future.ExecuteWhenReady(
[self = internal::IntrusivePtr<MinishardIndexReadOperationState>(this),
minishard_fetch_batch = std::move(minishard_fetch_batch),
&request](ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), &request,
minishard_fetch_batch = std::move(minishard_fetch_batch),
future = std::move(future)] {
OnShardIndexReady(std::move(self), request,
std::move(minishard_fetch_batch),
std::move(future.result()));
});
});
}
static void OnShardIndexReady(
internal::IntrusivePtr<MinishardIndexReadOperationState> self,
Request& request, Batch minishard_fetch_batch,
Result<kvstore::ReadResult>&& result) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
const auto set_error = [&](absl::Status status) {
byte_range_request.promise.SetResult(MaybeAnnotateStatus(
ConvertInvalidArgumentToFailedPrecondition(std::move(status)),
"Error retrieving shard index entry"));
};
TENSORSTORE_ASSIGN_OR_RETURN(auto&& read_result, result,
set_error(std::move(_)));
if (
read_result.aborted() ||
read_result.not_found()) {
byte_range_request.promise.SetResult(std::move(read_result));
return;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range, DecodeShardIndexEntry(read_result.value.Flatten()),
set_error(std::move(_)));
TENSORSTORE_ASSIGN_OR_RETURN(
byte_range,
GetAbsoluteShardByteRange(byte_range, self->driver().sharding_spec()),
set_error(std::move(_)));
if (byte_range.size() == 0) {
read_result.value.Clear();
read_result.state = kvstore::ReadResult::kMissing;
byte_range_request.promise.SetResult(std::move(read_result));
return;
}
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions.if_equal =
std::move(read_result.stamp.generation);
kvstore_read_options.staleness_bound = self->request_batch.staleness_bound;
kvstore_read_options.byte_range = byte_range;
kvstore_read_options.batch = std::move(minishard_fetch_batch);
auto read_future = self->driver().base()->Read(
self->ShardKey(), std::move(kvstore_read_options));
read_future.Force();
read_future.ExecuteWhenReady(
[self = std::move(self),
&request](ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), &request,
future = std::move(future)]() mutable {
self->OnMinishardIndexReadReady(request,
std::move(future.result()));
});
});
}
void OnMinishardIndexReadReady(Request& request,
Result<kvstore::ReadResult>&& result) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
TENSORSTORE_ASSIGN_OR_RETURN(
auto&& read_result, result,
static_cast<void>(byte_range_request.promise.SetResult(
internal::ConvertInvalidArgumentToFailedPrecondition(
std::move(_)))));
if (read_result.aborted()) {
MakeRequest<MinishardIndexReadOperationState>(
driver(), std::get<ShardIndex>(batch_entry_key),
kvstore::ReadGenerationConditions(
std::get<kvstore::ReadGenerationConditions>(batch_entry_key)),
retry_batch_, read_result.stamp.time, std::move(request));
return;
}
byte_range_request.promise.SetResult(std::move(read_result));
}
};
}
Future<kvstore::ReadResult> MinishardIndexKeyValueStore::Read(
Key key, ReadOptions options) {
ChunkCombinedShardInfo combined_info;
if (key.size() != sizeof(combined_info)) {
return absl::InvalidArgumentError("Key does not specify a minishard");
}
std::memcpy(&combined_info, key.data(), sizeof(combined_info));
auto split_info = GetSplitShardInfo(sharding_spec_, combined_info);
if (options.byte_range != OptionalByteRangeRequest()) {
return absl::InvalidArgumentError("Byte ranges not supported");
}
auto [promise, future] = PromiseFuturePair<ReadResult>::Make();
MinishardIndexReadOperationState::MakeRequest<
MinishardIndexReadOperationState>(
*this, split_info.shard, std::move(options.generation_conditions),
options.batch, options.staleness_bound,
MinishardIndexReadOperationState::Request{{std::move(promise)},
split_info.minishard});
return std::move(future);
}
class MinishardIndexCache
: public internal::KvsBackedCache<MinishardIndexCache,
internal::AsyncCache> {
using Base =
internal::KvsBackedCache<MinishardIndexCache, internal::AsyncCache>;
public:
using ReadData = std::vector<MinishardIndexEntry>;
class Entry : public Base::Entry {
public:
using OwningCache = MinishardIndexCache;
ChunkSplitShardInfo shard_info() {
ChunkCombinedShardInfo combined_info;
assert(this->key().size() == sizeof(combined_info));
std::memcpy(&combined_info, this->key().data(), sizeof(combined_info));
return GetSplitShardInfo(GetOwningCache(*this).sharding_spec(),
combined_info);
}
size_t ComputeReadDataSizeInBytes(const void* read_data) override {
return internal::EstimateHeapUsage(
*static_cast<const ReadData*>(read_data));
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
std::shared_ptr<ReadData> read_data;
if (value) {
if (auto result = DecodeMinishardIndexAndAdjustByteRanges(
*value, GetOwningCache(*this).sharding_spec());
result.ok()) {
read_data = std::make_shared<ReadData>(*std::move(result));
} else {
execution::set_error(receiver,
ConvertInvalidArgumentToFailedPrecondition(
std::move(result).status()));
return;
}
}
execution::set_value(receiver, std::move(read_data));
});
}
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
explicit MinishardIndexCache(kvstore::DriverPtr base_kvstore,
Executor executor, std::string key_prefix,
const ShardingSpec& sharding_spec)
: Base(kvstore::DriverPtr(new MinishardIndexKeyValueStore(
std::move(base_kvstore), executor, std::move(key_prefix),
sharding_spec))) {}
MinishardIndexKeyValueStore* kvstore_driver() {
return static_cast<MinishardIndexKeyValueStore*>(
this->Base::kvstore_driver());
}
const ShardingSpec& sharding_spec() {
return kvstore_driver()->sharding_spec();
}
kvstore::Driver* base_kvstore_driver() { return kvstore_driver()->base(); }
const Executor& executor() { return kvstore_driver()->executor(); }
const std::string& key_prefix() { return kvstore_driver()->key_prefix(); }
};
MinishardAndChunkId GetMinishardAndChunkId(std::string_view key) {
assert(key.size() == 16);
return {absl::big_endian::Load64(key.data()),
{absl::big_endian::Load64(key.data() + 8)}};
}
class ShardedKeyValueStoreWriteCache
: public internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache> {
using Base = internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache>;
public:
using ReadData = EncodedChunks;
static std::string ShardToKey(ShardIndex shard) {
std::string key;
key.resize(sizeof(ShardIndex));
absl::big_endian::Store64(key.data(), shard);
return key;
}
static ShardIndex KeyToShard(std::string_view key) {
assert(key.size() == sizeof(ShardIndex));
return absl::big_endian::Load64(key.data());
}
class Entry : public Base::Entry {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
ShardIndex shard() { return KeyToShard(key()); }
size_t ComputeReadDataSizeInBytes(const void* data) override {
return internal::EstimateHeapUsage(*static_cast<const ReadData*>(data));
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
EncodedChunks chunks;
if (value) {
if (auto result =
SplitShard(GetOwningCache(*this).sharding_spec(), *value);
result.ok()) {
chunks = *std::move(result);
} else {
execution::set_error(receiver,
ConvertInvalidArgumentToFailedPrecondition(
std::move(result).status()));
return;
}
}
execution::set_value(
receiver, std::make_shared<EncodedChunks>(std::move(chunks)));
});
}
void DoEncode(std::shared_ptr<const EncodedChunks> data,
EncodeReceiver receiver) override {
execution::set_value(
receiver, EncodeShard(GetOwningCache(*this).sharding_spec(), *data));
}
std::string GetKeyValueStoreKey() override {
auto& cache = GetOwningCache(*this);
return GetShardKey(cache.sharding_spec(), cache.key_prefix(),
this->shard());
}
};
class TransactionNode : public Base::TransactionNode,
public internal_kvstore::AtomicMultiPhaseMutation {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
using Base::TransactionNode::TransactionNode;
absl::Mutex& mutex() override { return this->mutex_; }
void PhaseCommitDone(size_t next_phase) override {}
internal::TransactionState::Node& GetTransactionNode() override {
return *this;
}
void Abort() override {
this->AbortRemainingPhases();
Base::TransactionNode::Abort();
}
std::string DescribeKey(std::string_view key) override {
auto& entry = GetOwningEntry(*this);
auto& cache = GetOwningCache(entry);
auto minishard_and_chunk_id = GetMinishardAndChunkId(key);
return tensorstore::StrCat(
"chunk ", minishard_and_chunk_id.chunk_id.value, " in minishard ",
minishard_and_chunk_id.minishard, " in ",
cache.kvstore_driver()->DescribeKey(entry.GetKeyValueStoreKey()));
}
void DoApply(ApplyOptions options, ApplyReceiver receiver) override;
void AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) override;
void RecordEntryWritebackError(
internal_kvstore::ReadModifyWriteEntry& entry,
absl::Status error) override {
absl::MutexLock lock(&mutex_);
if (apply_status_.ok()) {
apply_status_ = std::move(error);
}
}
void Revoke() override {
Base::TransactionNode::Revoke();
{ UniqueWriterLock(*this); }
this->RevokeAllEntries();
}
void WritebackSuccess(ReadState&& read_state) override;
void WritebackError() override;
void InvalidateReadState() override;
bool MultiPhaseReadsCommitted() override { return this->reads_committed_; }
void Read(
internal_kvstore::ReadModifyWriteEntry& entry,
kvstore::ReadModifyWriteTarget::TransactionalReadOptions&& options,
kvstore::ReadModifyWriteTarget::ReadReceiver&& receiver) override {
this->AsyncCache::TransactionNode::Read({options.staleness_bound})
.ExecuteWhenReady(WithExecutor(
GetOwningCache(*this).executor(),
[&entry,
if_not_equal =
std::move(options.generation_conditions.if_not_equal),
receiver = std::move(receiver)](
ReadyFuture<const void> future) mutable {
if (!future.result().ok()) {
execution::set_error(receiver, future.result().status());
return;
}
execution::submit(HandleShardReadSuccess(entry, if_not_equal),
receiver);
}));
}
static Result<kvstore::ReadResult> HandleShardReadSuccess(
internal_kvstore::ReadModifyWriteEntry& entry,
const StorageGeneration& if_not_equal) {
auto& self = static_cast<TransactionNode&>(entry.multi_phase());
TimestampedStorageGeneration stamp;
std::shared_ptr<const EncodedChunks> encoded_chunks;
{
AsyncCache::ReadLock<EncodedChunks> lock{self};
stamp = lock.stamp();
encoded_chunks = lock.shared_data();
}
if (!StorageGeneration::IsUnknown(stamp.generation) &&
stamp.generation == if_not_equal) {
return kvstore::ReadResult::Unspecified(std::move(stamp));
}
if (StorageGeneration::IsDirty(stamp.generation)) {
stamp.generation =
StorageGeneration::AddLayer(std::move(stamp.generation));
}
auto* chunk =
FindChunk(*encoded_chunks, GetMinishardAndChunkId(entry.key_));
if (!chunk) {
return kvstore::ReadResult::Missing(std::move(stamp));
} else {
TENSORSTORE_ASSIGN_OR_RETURN(
absl::Cord value,
DecodeData(chunk->encoded_data,
GetOwningCache(self).sharding_spec().data_encoding));
return kvstore::ReadResult::Value(std::move(value), std::move(stamp));
}
}
void Writeback(internal_kvstore::ReadModifyWriteEntry& entry,
internal_kvstore::ReadModifyWriteEntry& source_entry,
kvstore::ReadResult&& read_result) override {
auto& value = read_result.value;
if (read_result.state == kvstore::ReadResult::kValue) {
value = EncodeData(value,
GetOwningCache(*this).sharding_spec().data_encoding);
}
internal_kvstore::AtomicMultiPhaseMutation::Writeback(
entry, entry, std::move(read_result));
}
ApplyReceiver apply_receiver_;
ApplyOptions apply_options_;
absl::Status apply_status_;
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
explicit ShardedKeyValueStoreWriteCache(
internal::CachePtr<MinishardIndexCache> minishard_index_cache,
GetMaxChunksPerShardFunction get_max_chunks_per_shard)
: Base(kvstore::DriverPtr(minishard_index_cache->base_kvstore_driver())),
minishard_index_cache_(std::move(minishard_index_cache)),
get_max_chunks_per_shard_(std::move(get_max_chunks_per_shard)) {}
const ShardingSpec& sharding_spec() const {
return minishard_index_cache()->sharding_spec();
}
const std::string& key_prefix() const {
return minishard_index_cache()->key_prefix();
}
const internal::CachePtr<MinishardIndexCache>& minishard_index_cache() const {
return minishard_index_cache_;
}
const Executor& executor() { return minishard_index_cache()->executor(); }
internal::CachePtr<MinishardIndexCache> minishard_index_cache_;
GetMaxChunksPerShardFunction get_max_chunks_per_shard_;
};
void ShardedKeyValueStoreWriteCache::TransactionNode::InvalidateReadState() {
Base::TransactionNode::InvalidateReadState();
internal_kvstore::InvalidateReadState(phases_);
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackSuccess(
ReadState&& read_state) {
for (auto& entry : phases_.entries_) {
internal_kvstore::WritebackSuccess(
static_cast<internal_kvstore::ReadModifyWriteEntry&>(entry),
read_state.stamp);
}
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackSuccess(std::move(read_state));
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackError() {
internal_kvstore::WritebackError(phases_);
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackError();
}
namespace {
void StartApply(ShardedKeyValueStoreWriteCache::TransactionNode& node) {
node.RetryAtomicWriteback(node.apply_options_.staleness_bound);
}
void MergeForWriteback(ShardedKeyValueStoreWriteCache::TransactionNode& node,
bool conditional) {
TimestampedStorageGeneration stamp;
std::shared_ptr<const EncodedChunks> shared_existing_chunks;
span<const EncodedChunk> existing_chunks;
if (conditional) {
auto lock = internal::AsyncCache::ReadLock<EncodedChunks>{node};
stamp = lock.stamp();
shared_existing_chunks = lock.shared_data();
existing_chunks = *shared_existing_chunks;
} else {
stamp = TimestampedStorageGeneration::Unconditional();
}
std::vector<EncodedChunk> chunks;
size_t existing_index = 0;
bool mismatch = false;
bool changed = false;
for (auto& entry : node.phases_.entries_) {
auto& buffered_entry =
static_cast<internal_kvstore::AtomicMultiPhaseMutation::
BufferedReadModifyWriteEntry&>(entry);
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation) &&
StorageGeneration::Clean(entry_stamp.generation) !=
StorageGeneration::Clean(stamp.generation)) {
mismatch = true;
break;
}
if (buffered_entry.value_state_ == kvstore::ReadResult::kUnspecified ||
!StorageGeneration::IsInnerLayerDirty(entry_stamp.generation)) {
continue;
}
auto minishard_and_chunk_id = GetMinishardAndChunkId(buffered_entry.key_);
while (existing_index < static_cast<size_t>(existing_chunks.size())) {
auto& existing_chunk = existing_chunks[existing_index];
if (existing_chunk.minishard_and_chunk_id < minishard_and_chunk_id) {
chunks.push_back(existing_chunk);
++existing_index;
} else if (existing_chunk.minishard_and_chunk_id ==
minishard_and_chunk_id) {
changed = true;
++existing_index;
break;
} else {
break;
}
}
if (buffered_entry.value_state_ == kvstore::ReadResult::kValue) {
chunks.push_back(
EncodedChunk{minishard_and_chunk_id, buffered_entry.value_});
changed = true;
}
}
if (mismatch) {
node.apply_options_.staleness_bound = absl::Now();
StartApply(node);
return;
}
chunks.insert(chunks.end(), existing_chunks.begin() + existing_index,
existing_chunks.end());
internal::AsyncCache::ReadState update;
update.stamp = std::move(stamp);
if (changed) {
update.stamp.generation.MarkDirty();
}
update.data = std::make_shared<EncodedChunks>(std::move(chunks));
execution::set_value(std::exchange(node.apply_receiver_, {}),
std::move(update));
}
}
void ShardedKeyValueStoreWriteCache::TransactionNode::DoApply(
ApplyOptions options, ApplyReceiver receiver) {
apply_receiver_ = std::move(receiver);
apply_options_ = options;
apply_status_ = absl::Status();
GetOwningCache(*this).executor()([this] { StartApply(*this); });
}
void ShardedKeyValueStoreWriteCache::TransactionNode::AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) {
if (!apply_status_.ok()) {
execution::set_error(std::exchange(apply_receiver_, {}),
std::exchange(apply_status_, {}));
return;
}
auto& self = *this;
GetOwningCache(*this).executor()([&self] {
TimestampedStorageGeneration stamp;
bool mismatch = false;
bool modified = false;
size_t num_chunks = 0;
for (auto& entry : self.phases_.entries_) {
auto& buffered_entry =
static_cast<AtomicMultiPhaseMutation::BufferedReadModifyWriteEntry&>(
entry);
if (buffered_entry.value_state_ != kvstore::ReadResult::kUnspecified) {
modified = true;
++num_chunks;
}
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation)) {
if (!StorageGeneration::IsUnknown(stamp.generation) &&
StorageGeneration::Clean(stamp.generation) !=
StorageGeneration::Clean(entry_stamp.generation)) {
mismatch = true;
break;
} else {
stamp = entry_stamp;
}
}
}
if (mismatch) {
self.apply_options_.staleness_bound = absl::Now();
StartApply(self);
return;
}
auto& cache = GetOwningCache(self);
if (!modified && StorageGeneration::IsUnknown(stamp.generation) &&
self.apply_options_.apply_mode !=
ApplyOptions::ApplyMode::kSpecifyUnchanged) {
internal::AsyncCache::ReadState update;
update.stamp = TimestampedStorageGeneration::Unconditional();
execution::set_value(std::exchange(self.apply_receiver_, {}),
std::move(update));
return;
}
if (!StorageGeneration::IsUnknown(stamp.generation) ||
!cache.get_max_chunks_per_shard_ ||
cache.get_max_chunks_per_shard_(GetOwningEntry(self).shard()) !=
num_chunks) {
self.internal::AsyncCache::TransactionNode::Read(
{self.apply_options_.staleness_bound})
.ExecuteWhenReady([&self](ReadyFuture<const void> future) {
if (!future.result().ok()) {
execution::set_error(std::exchange(self.apply_receiver_, {}),
future.result().status());
return;
}
GetOwningCache(self).executor()(
[&self] { MergeForWriteback(self, true); });
});
return;
}
MergeForWriteback(self, false);
});
}
Result<ChunkId> KeyToChunkIdOrError(std::string_view key) {
if (auto chunk_id = KeyToChunkId(key)) {
return *chunk_id;
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid key: ", tensorstore::QuoteString(key)));
}
}
struct ShardedKeyValueStoreSpecData {
Context::Resource<internal::CachePoolResource> cache_pool;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
kvstore::Spec base;
ShardingSpec metadata;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ShardedKeyValueStoreSpecData,
internal_json_binding::NoOptions,
IncludeDefaults,
::nlohmann::json::object_t)
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.cache_pool, x.data_copy_concurrency, x.base, x.metadata);
};
};
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
ShardedKeyValueStoreSpecData,
jb::Object(
jb::Member("base",
jb::Projection<&ShardedKeyValueStoreSpecData::base>()),
jb::Initialize([](auto* obj) {
internal::EnsureDirectoryPath(obj->base.path);
return absl::OkStatus();
}),
jb::Member("metadata",
jb::Projection<&ShardedKeyValueStoreSpecData::metadata>(
jb::DefaultInitializedValue())),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&ShardedKeyValueStoreSpecData::cache_pool>()),
jb::Member(
internal::DataCopyConcurrencyResource::id,
jb::Projection<
&ShardedKeyValueStoreSpecData::data_copy_concurrency>())));
class ShardedKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<
ShardedKeyValueStoreSpec, ShardedKeyValueStoreSpecData> {
public:
static constexpr char id[] = "neuroglancer_uint64_sharded";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<kvstore::Spec> GetBase(std::string_view path) const override {
return data_.base;
}
};
class ShardedKeyValueStore
: public internal_kvstore::RegisteredDriver<ShardedKeyValueStore,
ShardedKeyValueStoreSpec> {
public:
explicit ShardedKeyValueStore(
kvstore::DriverPtr base_kvstore, Executor executor,
std::string key_prefix, const ShardingSpec& sharding_spec,
internal::CachePool::WeakPtr cache_pool,
GetMaxChunksPerShardFunction get_max_chunks_per_shard = {})
: write_cache_(internal::GetCache<ShardedKeyValueStoreWriteCache>(
cache_pool.get(), "",
[&] {
return std::make_unique<ShardedKeyValueStoreWriteCache>(
internal::GetCache<MinishardIndexCache>(
cache_pool.get(), "",
[&] {
return std::make_unique<MinishardIndexCache>(
std::move(base_kvstore), std::move(executor),
std::move(key_prefix), sharding_spec);
}),
std::move(get_max_chunks_per_shard));
})),
is_raw_encoding_(sharding_spec.data_encoding ==
ShardingSpec::DataEncoding::raw) {}
Future<ReadResult> Read(Key key, ReadOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override {
struct State {
ListReceiver receiver_;
Promise<void> promise_;
Future<void> future_;
ListOptions options_;
explicit State(ListReceiver&& receiver, ListOptions&& options)
: receiver_(std::move(receiver)), options_(std::move(options)) {
auto [promise, future] = PromiseFuturePair<void>::Make(MakeResult());
this->promise_ = std::move(promise);
this->future_ = std::move(future);
future_.Force();
execution::set_starting(receiver_, [promise = promise_] {
promise.SetResult(absl::CancelledError(""));
});
}
~State() {
auto& r = promise_.raw_result();
if (r.ok()) {
execution::set_done(receiver_);
} else {
execution::set_error(receiver_, r.status());
}
execution::set_stopping(receiver_);
}
};
auto state =
std::make_shared<State>(std::move(receiver), std::move(options));
ShardIndex num_shards = ShardIndex{1} << sharding_spec().shard_bits;
for (ShardIndex shard = 0; shard < num_shards; ++shard) {
auto entry = GetCacheEntry(
write_cache_, ShardedKeyValueStoreWriteCache::ShardToKey(shard));
LinkValue(
[state, entry, is_raw_encoding = is_raw_encoding_](
Promise<void> promise, ReadyFuture<const void> future) {
auto chunks = internal::AsyncCache::ReadLock<EncodedChunks>(*entry)
.shared_data();
if (!chunks) return;
for (auto& chunk : *chunks) {
auto key = ChunkIdToKey(chunk.minishard_and_chunk_id.chunk_id);
if (!Contains(state->options_.range, key)) continue;
key.erase(0, state->options_.strip_prefix_length);
execution::set_value(
state->receiver_,
ListEntry{
std::move(key),
is_raw_encoding
? ListEntry::checked_size(chunk.encoded_data.size())
: -1,
});
}
},
state->promise_, entry->Read({absl::InfiniteFuture()}));
}
}
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override {
return internal_kvstore::WriteViaTransaction(
this, std::move(key), std::move(value), std::move(options));
}
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override {
TENSORSTORE_ASSIGN_OR_RETURN(ChunkId chunk_id, KeyToChunkIdOrError(key));
const auto& sharding_spec = this->sharding_spec();
const auto shard_info = GetSplitShardInfo(
sharding_spec, GetChunkShardInfo(sharding_spec, chunk_id));
const ShardIndex shard = shard_info.shard;
auto entry = GetCacheEntry(
write_cache_, ShardedKeyValueStoreWriteCache::ShardToKey(shard));
std::string key_within_shard;
key_within_shard.resize(16);
absl::big_endian::Store64(key_within_shard.data(), shard_info.minishard);
absl::big_endian::Store64(key_within_shard.data() + 8, chunk_id.value);
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*entry, transaction));
node->ReadModifyWrite(phase, std::move(key_within_shard), source);
if (!transaction) {
transaction.reset(node.unlock()->transaction());
}
return absl::OkStatus();
}
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction,
KeyRange range) override {
return absl::UnimplementedError("DeleteRange not supported");
}
std::string DescribeKey(std::string_view key) override {
auto chunk_id_opt = KeyToChunkId(key);
if (!chunk_id_opt) {
return tensorstore::StrCat("invalid key ", tensorstore::QuoteString(key));
}
const auto& sharding_spec = this->sharding_spec();
const auto shard_info = GetSplitShardInfo(
sharding_spec, GetChunkShardInfo(sharding_spec, *chunk_id_opt));
return tensorstore::StrCat(
"chunk ", chunk_id_opt->value, " in minishard ", shard_info.minishard,
" in ",
base_kvstore_driver()->DescribeKey(
GetShardKey(sharding_spec, key_prefix(), shard_info.shard)));
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return base_kvstore_driver()->GetSupportedFeatures(
KeyRange::Prefix(key_prefix()));
}
Result<KvStore> GetBase(std::string_view path,
const Transaction& transaction) const override {
return KvStore(kvstore::DriverPtr(base_kvstore_driver()), key_prefix(),
transaction);
}
kvstore::Driver* base_kvstore_driver() const {
return minishard_index_cache()->base_kvstore_driver();
}
const ShardingSpec& sharding_spec() const {
return minishard_index_cache()->sharding_spec();
}
const Executor& executor() const {
return minishard_index_cache()->executor();
}
const std::string& key_prefix() const {
return minishard_index_cache()->key_prefix();
}
const internal::CachePtr<MinishardIndexCache>& minishard_index_cache() const {
return write_cache_->minishard_index_cache_;
}
absl::Status GetBoundSpecData(ShardedKeyValueStoreSpecData& spec) const;
internal::CachePtr<ShardedKeyValueStoreWriteCache> write_cache_;
Context::Resource<internal::CachePoolResource> cache_pool_resource_;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency_resource_;
bool is_raw_encoding_ = false;
};
namespace {
class ReadOperationState;
using ReadOperationStateBase = internal_kvstore_batch::BatchReadEntry<
ShardedKeyValueStore,
internal_kvstore_batch::ReadRequest<MinishardAndChunkId,
kvstore::ReadGenerationConditions>,
ShardIndex>;
class ReadOperationState
: public ReadOperationStateBase,
public internal::AtomicReferenceCount<ReadOperationState> {
public:
explicit ReadOperationState(BatchEntryKey&& batch_entry_key_)
: ReadOperationStateBase(std::move(batch_entry_key_)),
internal::AtomicReferenceCount<ReadOperationState>(
1) {}
private:
Batch retry_batch_{no_batch};
void Submit(Batch::View batch) override {
const auto& executor = driver().executor();
executor(
[this, batch = Batch(batch)] { this->ProcessBatch(std::move(batch)); });
}
void ProcessBatch(Batch batch) {
internal::IntrusivePtr<ReadOperationState> self(this,
internal::adopt_object_ref);
span<Request> requests = request_batch.requests;
std::sort(request_batch.requests.begin(), request_batch.requests.end(),
[](const Request& a, const Request& b) {
return std::get<MinishardAndChunkId>(a) <
std::get<MinishardAndChunkId>(b);
});
if (ShouldReadEntireShard()) {
ReadEntireShard(std::move(self), std::move(batch));
return;
}
retry_batch_ = Batch::New();
Batch data_fetch_batch{no_batch};
for (size_t minishard_start_i = 0; minishard_start_i < requests.size();) {
size_t minishard_end_i = minishard_start_i + 1;
auto minishard =
std::get<MinishardAndChunkId>(requests[minishard_start_i]).minishard;
while (
minishard_end_i < requests.size() &&
std::get<MinishardAndChunkId>(requests[minishard_end_i]).minishard ==
minishard) {
++minishard_end_i;
}
ProcessMinishard(batch, minishard,
requests.subspan(minishard_start_i,
minishard_end_i - minishard_start_i),
data_fetch_batch);
minishard_start_i = minishard_end_i;
}
}
bool ShouldReadEntireShard() {
const auto& get_max_chunks_per_shard =
driver().write_cache_->get_max_chunks_per_shard_;
if (!get_max_chunks_per_shard) return false;
uint64_t max_chunks =
get_max_chunks_per_shard(std::get<ShardIndex>(batch_entry_key));
if (request_batch.requests.size() < max_chunks) {
return false;
}
const auto& first_request = request_batch.requests[0];
uint64_t num_chunks_covered = 0;
std::optional<uint64_t> prev_chunk_covered;
for (const auto& request : request_batch.requests) {
if (std::get<kvstore::ReadGenerationConditions>(request) !=
std::get<kvstore::ReadGenerationConditions>(first_request)) {
return false;
}
if (std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.byte_range.IsFull()) {
uint64_t chunk_id =
std::get<MinishardAndChunkId>(request).chunk_id.value;
if (chunk_id != prev_chunk_covered) {
prev_chunk_covered = chunk_id;
++num_chunks_covered;
}
}
}
return (num_chunks_covered == max_chunks);
}
std::string ShardKey() {
const auto& sharding_spec = driver().sharding_spec();
return GetShardKey(sharding_spec, driver().key_prefix(),
std::get<ShardIndex>(batch_entry_key));
}
static void ReadEntireShard(internal::IntrusivePtr<ReadOperationState> self,
Batch batch) {
auto& first_request = self->request_batch.requests[0];
kvstore::ReadOptions read_options;
read_options.batch = std::move(batch);
read_options.generation_conditions =
std::move(std::get<kvstore::ReadGenerationConditions>(first_request));
read_options.staleness_bound = self->request_batch.staleness_bound;
auto& driver = self->driver();
auto read_future = driver.base_kvstore_driver()->Read(
GetShardKey(driver.sharding_spec(), driver.key_prefix(),
std::get<ShardIndex>(self->batch_entry_key)),
std::move(read_options));
read_future.Force();
std::move(read_future)
.ExecuteWhenReady([self = std::move(self)](
ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), future = std::move(future)] {
OnEntireShardReady(std::move(self), std::move(future.result()));
});
});
}
static void OnEntireShardReady(
internal::IntrusivePtr<ReadOperationState> self,
Result<kvstore::ReadResult>&& result) {
if (!result.ok() || !result->has_value()) {
internal_kvstore_batch::SetCommonResult(self->request_batch.requests,
std::move(result));
return;
}
auto& read_result = *result;
const auto& sharding_spec = self->driver().sharding_spec();
TENSORSTORE_ASSIGN_OR_RETURN(auto encoded_chunks,
SplitShard(sharding_spec, read_result.value),
internal_kvstore_batch::SetCommonResult(
self->request_batch.requests, _));
span<Request> requests = self->request_batch.requests;
size_t request_i = 0;
const auto complete_not_found = [&](Request& request) {
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(kvstore::ReadResult::Missing(read_result.stamp));
};
for (const auto& encoded_chunk : encoded_chunks) {
auto decoded_data_result =
DecodeData(encoded_chunk.encoded_data, sharding_spec.data_encoding);
const auto complete_request =
[&](Request& request) -> Result<kvstore::ReadResult> {
TENSORSTORE_ASSIGN_OR_RETURN(
const auto& decoded_data, decoded_data_result,
internal::ConvertInvalidArgumentToFailedPrecondition(_));
TENSORSTORE_ASSIGN_OR_RETURN(
auto validated_byte_range,
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.byte_range.Validate(decoded_data.size()));
kvstore::ReadResult request_read_result;
request_read_result.stamp = read_result.stamp;
request_read_result.state = kvstore::ReadResult::kValue;
request_read_result.value =
internal::GetSubCord(decoded_data, validated_byte_range);
return request_read_result;
};
auto decoded_key = encoded_chunk.minishard_and_chunk_id;
for (; request_i < requests.size(); ++request_i) {
auto& request = requests[request_i];
auto request_key = std::get<MinishardAndChunkId>(request);
if (request_key < decoded_key) {
complete_not_found(request);
} else if (request_key == decoded_key) {
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(complete_request(request));
} else {
break;
}
}
}
for (; request_i < requests.size(); ++request_i) {
complete_not_found(requests[request_i]);
}
}
void ProcessMinishard(Batch::View batch, MinishardIndex minishard,
span<Request> requests, Batch& data_fetch_batch) {
ChunkSplitShardInfo split_shard_info;
split_shard_info.shard = std::get<ShardIndex>(batch_entry_key);
split_shard_info.minishard = minishard;
auto shard_info =
GetCombinedShardInfo(driver().sharding_spec(), split_shard_info);
auto minishard_index_cache_entry = GetCacheEntry(
driver().minishard_index_cache(),
std::string_view(reinterpret_cast<const char*>(&shard_info),
sizeof(shard_info)));
auto minishard_index_read_future = minishard_index_cache_entry->Read(
{request_batch.staleness_bound, batch});
Batch successor_batch{no_batch};
if (batch) {
if (minishard_index_read_future.ready()) {
successor_batch = batch;
} else {
if (!data_fetch_batch) {
data_fetch_batch = Batch::New();
}
successor_batch = data_fetch_batch;
}
}
const auto& executor = driver().executor();
std::move(minishard_index_read_future)
.ExecuteWhenReady(WithExecutor(
executor,
[self = internal::IntrusivePtr<ReadOperationState>(this), requests,
minishard_index_cache_entry =
std::move(minishard_index_cache_entry),
successor_batch = std::move(successor_batch)](
ReadyFuture<const void> future) mutable {
const auto& status = future.status();
if (!status.ok()) {
internal_kvstore_batch::SetCommonResult<Request>(requests,
{status});
return;
}
OnMinishardIndexReady(std::move(self), requests,
std::move(successor_batch),
std::move(minishard_index_cache_entry));
}));
}
static void OnMinishardIndexReady(
internal::IntrusivePtr<ReadOperationState> self, span<Request> requests,
Batch successor_batch,
internal::PinnedCacheEntry<MinishardIndexCache>
minishard_index_cache_entry) {
std::shared_ptr<const std::vector<MinishardIndexEntry>> minishard_index;
TimestampedStorageGeneration stamp;
{
auto lock = internal::AsyncCache::ReadLock<MinishardIndexCache::ReadData>(
*minishard_index_cache_entry);
stamp = lock.stamp();
minishard_index = lock.shared_data();
}
assert(!StorageGeneration::IsUnknown(stamp.generation));
if (!minishard_index) {
internal_kvstore_batch::SetCommonResult(
requests, kvstore::ReadResult::Missing(std::move(stamp)));
return;
}
const auto& sharding_spec = self->driver().sharding_spec();
const auto process_chunk = [&](ChunkId chunk_id,
span<Request> chunk_requests) {
auto byte_range = FindChunkInMinishard(*minishard_index, chunk_id);
if (!byte_range) {
internal_kvstore_batch::SetCommonResult(
chunk_requests, kvstore::ReadResult::Missing(stamp));
return;
}
int64_t size = byte_range->size();
chunk_requests = chunk_requests.first(
std::remove_if(
chunk_requests.begin(), chunk_requests.end(),
[&](Request& request) {
return !internal_kvstore_batch::ValidateRequestGeneration(
request, stamp);
}) -
chunk_requests.begin());
if (sharding_spec.data_encoding == ShardingSpec::DataEncoding::raw) {
const auto process_request = [&](Request& request) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
TENSORSTORE_ASSIGN_OR_RETURN(
auto sub_byte_range, byte_range_request.byte_range.Validate(size),
static_cast<void>(byte_range_request.promise.SetResult(_)));
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions.if_equal =
stamp.generation;
kvstore_read_options.staleness_bound =
self->request_batch.staleness_bound;
kvstore_read_options.byte_range = ByteRange{
byte_range->inclusive_min + sub_byte_range.inclusive_min,
byte_range->inclusive_min + sub_byte_range.exclusive_max};
kvstore_read_options.batch = successor_batch;
auto value_read_future = self->driver().base_kvstore_driver()->Read(
self->ShardKey(), std::move(kvstore_read_options));
value_read_future.Force();
std::move(value_read_future)
.ExecuteWhenReady([self,
&request](ReadyFuture<kvstore::ReadResult>
future) mutable {
TENSORSTORE_ASSIGN_OR_RETURN(
auto&& read_result, std::move(future.result()),
static_cast<void>(
std::get<internal_kvstore_batch::ByteRangeReadRequest>(
request)
.promise.SetResult(_)));
self->OnRawValueReady(request, std::move(read_result));
});
};
for (auto& request : chunk_requests) {
process_request(request);
}
} else {
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions.if_equal = stamp.generation;
kvstore_read_options.staleness_bound =
self->request_batch.staleness_bound;
kvstore_read_options.byte_range = *byte_range;
kvstore_read_options.batch = successor_batch;
auto value_read_future = self->driver().base_kvstore_driver()->Read(
self->ShardKey(), std::move(kvstore_read_options));
value_read_future.Force();
std::move(value_read_future)
.ExecuteWhenReady(
[self, chunk_requests](
ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), chunk_requests,
future = std::move(future)] {
TENSORSTORE_ASSIGN_OR_RETURN(
auto&& read_result, std::move(future.result()),
internal_kvstore_batch::SetCommonResult(chunk_requests,
_));
self->OnEncodedValueReady(chunk_requests,
std::move(read_result));
});
});
}
};
for (size_t request_i = 0; request_i < requests.size();) {
ChunkId chunk_id =
std::get<MinishardAndChunkId>(requests[request_i]).chunk_id;
size_t end_request_i;
for (end_request_i = request_i + 1; end_request_i < requests.size();
++end_request_i) {
if (std::get<MinishardAndChunkId>(requests[end_request_i]).chunk_id !=
chunk_id)
break;
}
process_chunk(chunk_id,
requests.subspan(request_i, end_request_i - request_i));
request_i = end_request_i;
}
}
void OnRawValueReady(Request& request, kvstore::ReadResult&& read_result) {
if (read_result.aborted()) {
MakeRequest<ReadOperationState>(
driver(), std::get<ShardIndex>(batch_entry_key), retry_batch_,
read_result.stamp.time, std::move(request));
return;
}
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(std::move(read_result));
}
void OnEncodedValueReady(span<Request> chunk_requests,
kvstore::ReadResult&& read_result) {
if (read_result.aborted()) {
for (auto& request : chunk_requests) {
MakeRequest<ReadOperationState>(
driver(), std::get<ShardIndex>(batch_entry_key), retry_batch_,
read_result.stamp.time, std::move(request));
}
return;
}
if (!read_result.has_value()) {
internal_kvstore_batch::SetCommonResult(chunk_requests,
std::move(read_result));
return;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto decoded_value,
DecodeData(read_result.value, driver().sharding_spec().data_encoding),
internal_kvstore_batch::SetCommonResult(
chunk_requests,
internal::ConvertInvalidArgumentToFailedPrecondition(_)));
const auto process_request =
[&](Request& request) -> Result<kvstore::ReadResult> {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range,
byte_range_request.byte_range.Validate(decoded_value.size()));
return kvstore::ReadResult::Value(
internal::GetSubCord(decoded_value, byte_range), read_result.stamp);
};
for (auto& request : chunk_requests) {
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(process_request(request));
}
}
};
}
Future<kvstore::ReadResult> ShardedKeyValueStore::Read(Key key,
ReadOptions options) {
TENSORSTORE_ASSIGN_OR_RETURN(ChunkId chunk_id, KeyToChunkIdOrError(key));
const auto& sharding_spec = this->sharding_spec();
auto shard_info = GetChunkShardInfo(sharding_spec, chunk_id);
auto split_shard_info = GetSplitShardInfo(sharding_spec, shard_info);
auto [promise, future] = PromiseFuturePair<kvstore::ReadResult>::Make();
ReadOperationState::MakeRequest<ReadOperationState>(
*this, split_shard_info.shard, options.batch, options.staleness_bound,
ReadOperationState::Request{
{std::move(promise), options.byte_range},
MinishardAndChunkId{split_shard_info.minishard, chunk_id},
std::move(options.generation_conditions)});
return std::move(future);
}
}
namespace garbage_collection {
template <>
struct GarbageCollection<neuroglancer_uint64_sharded::ShardedKeyValueStore> {
static void Visit(
GarbageCollectionVisitor& visitor,
const neuroglancer_uint64_sharded::ShardedKeyValueStore& value) {
garbage_collection::GarbageCollectionVisit(visitor,
*value.base_kvstore_driver());
}
};
}
namespace neuroglancer_uint64_sharded {
absl::Status ShardedKeyValueStore::GetBoundSpecData(
ShardedKeyValueStoreSpecData& spec) const {
TENSORSTORE_ASSIGN_OR_RETURN(spec.base.driver,
base_kvstore_driver()->GetBoundSpec());
spec.base.path = key_prefix();
if (!data_copy_concurrency_resource_.has_resource() ||
!cache_pool_resource_.has_resource()) {
return absl::InternalError("JSON representation not supported");
}
spec.data_copy_concurrency = data_copy_concurrency_resource_;
spec.cache_pool = cache_pool_resource_;
spec.metadata = sharding_spec();
return absl::Status();
}
Future<kvstore::DriverPtr> ShardedKeyValueStoreSpec::DoOpen() const {
return MapFutureValue(
InlineExecutor{},
[spec = internal::IntrusivePtr<const ShardedKeyValueStoreSpec>(this)](
kvstore::KvStore& base_kvstore) -> Result<kvstore::DriverPtr> {
auto driver = internal::MakeIntrusivePtr<ShardedKeyValueStore>(
std::move(base_kvstore.driver),
spec->data_.data_copy_concurrency->executor,
std::move(base_kvstore.path), spec->data_.metadata,
*spec->data_.cache_pool);
driver->data_copy_concurrency_resource_ =
spec->data_.data_copy_concurrency;
driver->cache_pool_resource_ = spec->data_.cache_pool;
return driver;
},
kvstore::Open(data_.base));
}
kvstore::DriverPtr GetShardedKeyValueStore(
kvstore::DriverPtr base_kvstore, Executor executor, std::string key_prefix,
const ShardingSpec& sharding_spec, internal::CachePool::WeakPtr cache_pool,
GetMaxChunksPerShardFunction get_max_chunks_per_shard) {
return kvstore::DriverPtr(new ShardedKeyValueStore(
std::move(base_kvstore), std::move(executor), std::move(key_prefix),
sharding_spec, std::move(cache_pool),
std::move(get_max_chunks_per_shard)));
}
std::string ChunkIdToKey(ChunkId chunk_id) {
std::string key;
key.resize(sizeof(uint64_t));
absl::big_endian::Store64(key.data(), chunk_id.value);
return key;
}
std::optional<ChunkId> KeyToChunkId(std::string_view key) {
if (key.size() != 8) return std::nullopt;
return ChunkId{absl::big_endian::Load64(key.data())};
}
}
}
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::neuroglancer_uint64_sharded::ShardedKeyValueStoreSpec>
registration;
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <initializer_list>
#include <map>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache_testutil.h"
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace zlib = ::tensorstore::zlib;
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Batch;
using ::tensorstore::Future;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::Result;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::Transaction;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::KvsBackedTestCache;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultAborted;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::neuroglancer_uint64_sharded::ChunkIdToKey;
using ::tensorstore::neuroglancer_uint64_sharded::GetShardedKeyValueStore;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
absl::Cord Bytes(std::initializer_list<unsigned char> x) {
return absl::Cord(std::string(x.begin(), x.end()));
}
std::string GetChunkKey(uint64_t chunk_id) { return ChunkIdToKey({chunk_id}); }
class GetUint64Key {
public:
GetUint64Key(bool sequential) : sequential_(sequential) {}
std::string operator()(std::string key) const {
auto it = key_to_uint64_.find(key);
if (it == key_to_uint64_.end()) {
while (true) {
auto x = sequential_ ? next_chunk_id_++ : absl::Uniform<uint64_t>(gen_);
if (uint64_to_key_.emplace(x, key).second) {
it = key_to_uint64_.emplace(key, x).first;
break;
}
}
}
return GetChunkKey(it->second);
}
private:
bool sequential_;
mutable uint64_t next_chunk_id_ = 0;
mutable absl::BitGen gen_;
mutable absl::flat_hash_map<std::string, uint64_t> key_to_uint64_;
mutable absl::flat_hash_map<uint64_t, std::string> uint64_to_key_;
};
tensorstore::Executor GetExecutor(std::string_view executor_name) {
if (executor_name == "inline") return tensorstore::InlineExecutor{};
return tensorstore::internal::DetachedThreadPool(2);
}
struct BasicFunctionalityTestOptions {
std::string_view executor_name = "thread_pool";
bool sequential_ids = false;
std::string_view hash = "identity";
std::string_view data_encoding = "raw";
std::string_view minishard_index_encoding = "raw";
bool all_zero_bits = false;
};
void TestReadWriteOps(BasicFunctionalityTestOptions options) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", options.hash},
{"preshift_bits", options.all_zero_bits ? 0 : 1},
{"minishard_bits", options.all_zero_bits ? 0 : 2},
{"shard_bits", options.all_zero_bits ? 0 : 3},
{"data_encoding", options.data_encoding},
{"minishard_index_encoding", options.minishard_index_encoding}};
auto cache_pool = CachePool::Make(kSmallCacheLimits);
auto base_kv_store = tensorstore::GetMemoryKeyValueStore();
auto sharding_spec = ShardingSpec::FromJson(sharding_spec_json).value();
SCOPED_TRACE(options.executor_name);
SCOPED_TRACE(sharding_spec_json.dump());
auto store = GetShardedKeyValueStore(
base_kv_store, GetExecutor(options.executor_name), "prefix",
sharding_spec, CachePool::WeakPtr(cache_pool));
GetUint64Key get_key_fn(options.sequential_ids);
tensorstore::internal::TestKeyValueReadWriteOps(store, get_key_fn);
}
TEST(Uint64ShardedKeyValueStoreTest, BasicFunctionality) {
{
BasicFunctionalityTestOptions options;
TestReadWriteOps(options);
options.sequential_ids = true;
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.hash = "murmurhash3_x86_128";
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.data_encoding = "gzip";
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.minishard_index_encoding = "gzip";
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.all_zero_bits = true;
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.executor_name = "inline";
TestReadWriteOps(options);
}
}
TEST(Uint64ShardedKeyValueStoreTest, DescribeKey) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr store = GetShardedKeyValueStore(
base_kv_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool));
for (const auto& [key, description] :
std::vector<std::pair<uint64_t, std::string>>{
{0, "chunk 0 in minishard 0 in \"prefix/0.shard\""},
{1, "chunk 1 in minishard 1 in \"prefix/0.shard\""},
{2, "chunk 2 in minishard 0 in \"prefix/1.shard\""},
{3, "chunk 3 in minishard 1 in \"prefix/1.shard\""},
}) {
EXPECT_EQ(description, store->DescribeKey(GetChunkKey(key)));
}
}
class RawEncodingTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 0},
{"shard_bits", 0},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr store = GetShardedKeyValueStore(
base_kv_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool));
};
TEST_F(RawEncodingTest, MultipleUnconditionalWrites) {
std::vector<absl::Cord> values{absl::Cord("abc"), absl::Cord("aaaaa"),
absl::Cord("efgh")};
std::vector<Future<TimestampedStorageGeneration>> futures;
auto key = GetChunkKey(10);
tensorstore::Transaction txn(tensorstore::isolated);
for (auto value : values) {
futures.push_back(kvstore::WriteCommitted(KvStore{store, txn}, key, value));
}
txn.CommitAsync().IgnoreFuture();
std::vector<Result<TimestampedStorageGeneration>> results;
for (const auto& future : futures) {
results.push_back(future.result());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto shard_read, base_kv_store->Read("prefix/0.shard").result());
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
for (size_t i = 0; i < results.size(); ++i) {
if (results[i] && results[i]->generation == shard_read.stamp.generation) {
EXPECT_THAT(store->Read(key).result(),
MatchesKvsReadResult(values[i], results[i]->generation));
}
}
}
TEST_F(RawEncodingTest, List) {
std::map<std::string, absl::Cord> values{
{GetChunkKey(1), absl::Cord("a")},
{GetChunkKey(2), absl::Cord("bc")},
{GetChunkKey(3), absl::Cord("def")},
{GetChunkKey(10), absl::Cord("xyz")}};
for (auto [key, value] : values) {
TENSORSTORE_EXPECT_OK(store->Write(key, value));
}
EXPECT_THAT(tensorstore::internal::GetMap(store),
::testing::Optional(::testing::ElementsAreArray(values)));
}
TEST_F(RawEncodingTest, WritesAndDeletes) {
StorageGeneration gen1, gen2, gen3;
{
tensorstore::Transaction txn(tensorstore::isolated);
auto init_future1 = kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(1), absl::Cord("a"));
auto init_future2 = kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(2), absl::Cord("bc"));
auto init_future3 = kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(3), absl::Cord("def"));
txn.CommitAsync().IgnoreFuture();
gen1 = init_future1.value().generation;
gen2 = init_future2.value().generation;
gen3 = init_future3.value().generation;
}
tensorstore::Transaction txn(tensorstore::isolated);
auto future1 = kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()});
auto future2 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(2),
absl::Cord("ww"), {gen2});
auto future3 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(2),
absl::Cord("xx"), {gen2});
auto future4 =
kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(4),
absl::Cord("zz"), {StorageGeneration::NoValue()});
auto future5 =
kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(3), {gen3});
txn.CommitAsync().IgnoreFuture();
EXPECT_THAT(future1.result(), MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto shard_read, base_kv_store->Read("prefix/0.shard").result());
EXPECT_THAT(
std::vector({future2.result(), future3.result()}),
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
EXPECT_THAT(store->Read(GetChunkKey(1)).result(),
MatchesKvsReadResult(absl::Cord("a")));
EXPECT_THAT(store->Read(GetChunkKey(2)).result(),
MatchesKvsReadResult(
!StorageGeneration::IsUnknown(future2.result()->generation)
? absl::Cord("ww")
: absl::Cord("xx")));
EXPECT_THAT(store->Read(GetChunkKey(3)).result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(store->Read(GetChunkKey(4)).result(),
MatchesKvsReadResult(absl::Cord("zz")));
}
std::vector<std::vector<Result<TimestampedStorageGeneration>>>
TestOrderDependentWrites(
std::function<void()> init,
std::function<Future<TimestampedStorageGeneration>()> op0,
std::function<Future<TimestampedStorageGeneration>()> op1,
std::function<void()> finalize) {
std::vector<std::vector<Result<TimestampedStorageGeneration>>> all_results;
for (int i = 0; i < 2; ++i) {
std::vector<Future<TimestampedStorageGeneration>> futures(2);
init();
if (i == 0) {
futures[0] = op0();
futures[1] = op1();
} else {
futures[1] = op1();
futures[0] = op0();
}
finalize();
all_results.push_back({futures[0].result(), futures[1].result()});
}
return all_results;
}
TEST_F(RawEncodingTest, WriteThenDelete) {
TENSORSTORE_ASSERT_OK(store->Write(GetChunkKey(1), absl::Cord("a")).result());
EXPECT_THAT(store->Read(GetChunkKey(1)).result(),
MatchesKvsReadResult(absl::Cord("a")));
TENSORSTORE_ASSERT_OK(store->Delete(GetChunkKey(1)).result());
EXPECT_THAT(store->Read(GetChunkKey(1)).result(),
MatchesKvsReadResultNotFound());
}
TEST_F(RawEncodingTest, MultipleDeleteExisting) {
StorageGeneration gen;
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
gen = store->Write(GetChunkKey(1), absl::Cord("a"))
.value()
.generation;
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{gen});
},
[&] {
return kvstore::DeleteCommitted(
KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::UnorderedElementsAre(
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(
StorageGeneration::NoValue())),
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()),
MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, WriteWithUnmatchedConditionAfterDelete) {
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
store->Delete(GetChunkKey(0)).value();
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(0),
absl::Cord("a"));
},
[&] {
return kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(0), absl::Cord("b"),
{StorageGeneration::FromString("g")});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::Each(::testing::ElementsAre(
MatchesTimestampedStorageGeneration(
::testing::AllOf(::testing::Not(StorageGeneration::NoValue()),
::testing::Not(StorageGeneration::Invalid()))),
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, MultipleDeleteNonExisting) {
tensorstore::Transaction txn(tensorstore::isolated);
std::vector futures{
kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()}),
kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()})};
txn.CommitAsync().IgnoreFuture();
std::vector results{futures[0].result(), futures[1].result()};
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue())));
}
TEST_F(RawEncodingTest, ShardIndexTooShort) {
base_kv_store->Write("prefix/0.shard", Bytes({1, 2, 3})).value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Requested byte range \\[0, 16\\) is not valid for value of size 3"));
EXPECT_THAT(
store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Existing shard has size 3, but expected at least: 16"));
}
TEST_F(RawEncodingTest, ShardIndexInvalidByteRange) {
base_kv_store
->Write("prefix/0.shard",
Bytes({10, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Shard index specified invalid byte range: \\[10, 2\\)"));
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing shard index entry for minishard 0: "
"Shard index specified invalid byte range: \\[10, 2\\)"));
}
TEST_F(RawEncodingTest, ShardIndexByteRangeOverflow) {
base_kv_store
->Write("prefix/0.shard",
Bytes({
10, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Byte range .* relative to the end of "
"the shard index \\(16\\) is not valid"));
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing shard index entry for minishard 0: "
"Byte range .* relative to the end of "
"the shard index \\(16\\) is not valid"));
}
TEST_F(RawEncodingTest, MinishardIndexOutOfRange) {
base_kv_store
->Write("prefix/0.shard",
Bytes({0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Requested byte range \\[16, 64\\) is "
"not valid for value of size 16"));
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing shard index entry for minishard 0: "
"Requested byte range .* is not valid for value of size 16"));
}
TEST_F(RawEncodingTest, MinishardIndexInvalidSize) {
base_kv_store
->Write("prefix/0.shard",
Bytes({0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Invalid minishard index length: 1"));
EXPECT_THAT(
store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing minishard index for minishard 0: "
"Invalid minishard index length: 1"));
}
TEST_F(RawEncodingTest, MinishardIndexByteRangeOverflow) {
base_kv_store
->Write("prefix/0.shard",
Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
24, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error decoding minishard index entry "
"for chunk 10: Byte range .* relative to the end "
"of the shard index \\(16\\) is not valid"));
}
TEST_F(RawEncodingTest, MinishardIndexEntryByteRangeOutOfRange) {
base_kv_store
->Write("prefix/0.shard", Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
24, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
200, 0, 0, 0, 0, 0, 0, 0,
}))
.value();
EXPECT_THAT(store->Write(GetChunkKey(1), absl::Cord("x")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Invalid existing byte range for chunk 10: "
"Requested byte range .* is not valid for value of size .*"));
}
TEST_F(RawEncodingTest, MinishardIndexWithDuplicateChunkId) {
base_kv_store
->Write("prefix/0.shard", Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
48, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
}))
.value();
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Chunk 10 occurs more than once in the minishard "
"index for minishard 0"));
}
class GzipEncodingTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 0},
{"shard_bits", 0},
{"data_encoding", "gzip"},
{"minishard_index_encoding", "gzip"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr store = GetShardedKeyValueStore(
base_kv_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool));
};
TEST_F(GzipEncodingTest, CorruptMinishardGzipEncoding) {
base_kv_store
->Write("prefix/0.shard", Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error decoding zlib-compressed data"));
EXPECT_THAT(
store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing minishard index for minishard 0: "
"Error decoding zlib-compressed data"));
}
TEST_F(GzipEncodingTest, CorruptDataGzipEncoding) {
absl::Cord shard_data("abc");
zlib::Options zlib_options;
zlib_options.use_gzip_header = true;
zlib::Encode(Bytes({
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
}),
&shard_data, zlib_options);
const unsigned char n = static_cast<unsigned char>(shard_data.size());
absl::Cord temp = Bytes({
3, 0, 0, 0, 0, 0, 0, 0,
n, 0, 0, 0, 0, 0, 0, 0,
});
temp.Append(shard_data);
TENSORSTORE_ASSERT_OK(base_kv_store->Write("prefix/0.shard", temp));
EXPECT_THAT(store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error decoding zlib-compressed data"));
}
class UnderlyingKeyValueStoreTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
kvstore::DriverPtr GetStore(
tensorstore::neuroglancer_uint64_sharded::GetMaxChunksPerShardFunction
get_max_chunks_per_shard = {}) {
return GetShardedKeyValueStore(
mock_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool), std::move(get_max_chunks_per_shard));
}
kvstore::DriverPtr store = GetStore();
};
TEST_F(UnderlyingKeyValueStoreTest, Read) {
absl::Time init_time = UniqueNow();
absl::Time minishard_index_time;
{
auto future = store->Read(GetChunkKey(0x50), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(0, 16), req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(init_time));
req.promise.SetResult(ReadResult::Value(
Bytes({
5, 0, 0, 0, 0, 0, 0, 0,
31, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(37, 63), req.options.byte_range);
minishard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Value(
Bytes({
0x50, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), minishard_index_time}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 37), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult::Value(Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(GetChunkKey(0x60), options);
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(minishard_index_time));
}
{
auto req_time = UniqueNow();
auto future = store->Read(GetChunkKey(0x60), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(0, 16), req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
minishard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), minishard_index_time}));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(minishard_index_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(GetChunkKey(0x50), options);
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 37), req.options.byte_range);
EXPECT_EQ(init_time, req.options.staleness_bound);
read_time = absl::Now();
req.promise.SetResult(
ReadResult::Value(Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(GetChunkKey(0x50), options);
absl::Time abort_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(init_time, req.options.staleness_bound);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 37), req.options.byte_range);
abort_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), abort_time}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(0, 16), req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Ge(abort_time));
req.promise.SetResult(ReadResult::Value(
Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g1"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g1"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
minishard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Value(
Bytes({
0x50, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g1"), minishard_index_time}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g1"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 38), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult::Value(Bytes({4, 5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g1"), read_time}));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({4, 5, 6, 7, 8, 9}),
StorageGeneration::FromString("g1"), read_time));
}
}
TEST_F(UnderlyingKeyValueStoreTest, TransactionReadThenCommit) {
tensorstore::Transaction txn(tensorstore::isolated);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
{
auto future = kvstore::Read(KvStore{store, txn}, GetChunkKey(0x50), {});
{
auto req = mock_store->read_requests.pop();
req(memory_store);
ASSERT_EQ(0, mock_store->read_requests.size());
}
EXPECT_THAT(future.result(),
::testing::Optional(MatchesKvsReadResultNotFound()));
}
auto commit_future = txn.CommitAsync();
TENSORSTORE_ASSERT_OK(commit_future.result());
}
TEST_F(UnderlyingKeyValueStoreTest,
ReadConcurrentModificationAfterReadingShardIndex) {
absl::Time init_time = absl::Now();
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(GetChunkKey(0x1), options);
absl::Time abort_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(init_time, req.options.staleness_bound);
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g2"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g2"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
abort_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g2"), abort_time}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.options.staleness_bound, ::testing::Ge(abort_time));
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
7, 0, 0, 0, 0, 0, 0, 0,
33, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g3"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g3"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(39, 65), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
0x1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g3"), absl::Now()}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g3"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 36), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(ReadResult::Value(
Bytes({4, 5, 6, 7}), {StorageGeneration::FromString("g3"), read_time}));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({4, 5, 6, 7}),
StorageGeneration::FromString("g3"), read_time));
}
TEST_F(UnderlyingKeyValueStoreTest,
ReadConcurrentDeleteAfterReadingShardIndex) {
auto req_time = UniqueNow();
auto future = store->Read(GetChunkKey(0x1), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g4"), absl::Now()}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g4"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(ReadResult::Missing(read_time));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesKvsReadResultNotFound(read_time));
}
TEST_F(UnderlyingKeyValueStoreTest,
ReadConcurrentDeleteAfterReadingMinishardIndex) {
auto req_time = UniqueNow();
auto future = store->Read(GetChunkKey(0x1), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
0x1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 36), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(ReadResult::Missing(read_time));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesKvsReadResultNotFound(read_time));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingShardIndex) {
auto future = store->Read(GetChunkKey(0x50), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(0, 16), req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingMinishardShardIndex) {
auto future = store->Read(GetChunkKey(0x1), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading minishard 1 in \"prefix/0\\.shard\": "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingData) {
auto future = store->Read(GetChunkKey(0x1), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
0x1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(32, 36), req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadInvalidKey) {
auto future = store->Read("abc", {});
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteInvalidKey) {
auto future = store->Write("abc", absl::Cord("x"));
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, DeleteInvalidKey) {
auto future = store->Delete("abc");
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithNoExistingShard) {
for (const bool with_max_chunks : {false, true}) {
SCOPED_TRACE(tensorstore::StrCat("with_max_chunks=", with_max_chunks));
if (with_max_chunks) {
store = GetStore(
[](uint64_t shard) -> uint64_t {
return 2;
});
} else {
store = GetStore();
}
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
req.promise.SetResult(ReadResult::Missing(absl::Now()));
}
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.value, ::testing::Optional(Bytes({
3, 0, 0, 0, 0, 0, 0, 0,
27, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
0x50, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
}
TEST_F(UnderlyingKeyValueStoreTest, UnconditionalWrite) {
store = GetStore(
[](uint64_t shard) -> uint64_t {
return 2;
});
auto txn = Transaction(tensorstore::isolated);
auto future1 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(0x50),
Bytes({1, 2, 3}));
auto future2 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(0x54),
Bytes({4, 5, 6}));
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_EQ(0, mock_store->write_requests.size());
txn.CommitAsync().IgnoreFuture();
ASSERT_EQ(0, mock_store->read_requests.size());
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.value, ::testing::Optional(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
54, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
4, 5, 6,
0x50, 0, 0, 0, 0, 0, 0, 0,
0x04, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future1.ready());
ASSERT_TRUE(future2.ready());
EXPECT_THAT(future1.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
EXPECT_THAT(future2.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, ConditionalWriteDespiteMaxChunks) {
store = GetStore(
[](uint64_t shard) -> uint64_t {
return 1;
});
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}),
{StorageGeneration::NoValue()});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
req.promise.SetResult(ReadResult::Missing(absl::Now()));
}
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
req.options.generation_conditions.if_equal);
}
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithNoExistingShardError) {
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
req.promise.SetResult(ReadResult::Missing(absl::Now()));
}
{
auto req = mock_store->write_requests.pop_nonblock().value();
req.promise.SetResult(absl::UnknownError("Write error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error writing \"prefix/0\\.shard\": "
"Write error"));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithExistingShard) {
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
req.promise.SetResult(
ReadResult::Value(Bytes({
3, 0, 0, 0, 0, 0, 0, 0,
27, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 5, 6,
0x70, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.value, ::testing::Optional(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
54, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
4, 5, 6,
0x50, 0, 0, 0, 0, 0, 0, 0,
0x20, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g1"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g1"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteMaxChunksWithExistingShard) {
for (const bool specify_max_chunks : {false, true}) {
if (specify_max_chunks) {
store = GetStore(
[](uint64_t shard) -> uint64_t {
return 1;
});
}
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
if (!specify_max_chunks) {
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
req.promise.SetResult(ReadResult::Missing(absl::Now()));
}
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ((specify_max_chunks ? StorageGeneration::Unknown()
: StorageGeneration::NoValue()),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.value, ::testing::Optional(Bytes({
3, 0, 0, 0, 0, 0, 0, 0,
27, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
0x50, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithExistingShardReadError) {
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading \"prefix/0\\.shard\": "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, DeleteRangeUnimplemented) {
EXPECT_THAT(store->DeleteRange(tensorstore::KeyRange::Prefix("abc")).result(),
MatchesStatus(absl::StatusCode::kUnimplemented));
}
TEST_F(UnderlyingKeyValueStoreTest, TransactionalDeleteRangeUnimplemented) {
EXPECT_THAT(
store->TransactionalDeleteRange({}, tensorstore::KeyRange::Prefix("abc")),
MatchesStatus(absl::StatusCode::kUnimplemented));
}
TEST_F(UnderlyingKeyValueStoreTest, BatchRead) {
cache_pool = CachePool::Make({});
auto memory_store = tensorstore::GetMemoryKeyValueStore();
mock_store->forward_to = memory_store;
mock_store->log_requests = true;
mock_store->handle_batch_requests = true;
auto store = GetStore(
[](uint64_t shard) -> uint64_t {
return 6;
});
auto key0 = GetChunkKey(0x50);
auto key1 = GetChunkKey(0x54);
auto key2 = GetChunkKey(0x58);
auto key3 = GetChunkKey(0x51);
auto key4 = GetChunkKey(0x55);
auto key5 = GetChunkKey(0x59);
auto key6 = GetChunkKey(0x52);
auto key7 = GetChunkKey(0x56);
auto key8 = GetChunkKey(0x5a);
TENSORSTORE_ASSERT_OK(store->Write(key0, absl::Cord("abc")).result());
TENSORSTORE_ASSERT_OK(store->Write(key1, absl::Cord("def")).result());
TENSORSTORE_ASSERT_OK(store->Write(key3, absl::Cord("key3-")).result());
TENSORSTORE_ASSERT_OK(store->Write(key4, absl::Cord("key4--")).result());
TENSORSTORE_ASSERT_OK(store->Write(key5, absl::Cord("key5---")).result());
TENSORSTORE_ASSERT_OK(store->Write(key6, absl::Cord("key6----")).result());
TENSORSTORE_ASSERT_OK(store->Write(key7, absl::Cord("key6-----")).result());
TENSORSTORE_ASSERT_OK(store->Write(key8, absl::Cord("key6------")).result());
mock_store->request_log.pop_all();
{
SCOPED_TRACE(
"Read 2/6 chunks from the same shard (same minibatch) in a single "
"batch");
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(key0, options),
store->Read(key1, options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(3));
}
{
SCOPED_TRACE("Read 6/6 entries from the same shard in a single batch");
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(key0, options),
store->Read(key1, options),
store->Read(key2, options),
store->Read(key3, options),
store->Read(key4, options),
store->Read(key5, options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(futures[2].result(), MatchesKvsReadResultNotFound());
EXPECT_THAT(futures[3].result(), MatchesKvsReadResult(absl::Cord("key3-")));
EXPECT_THAT(futures[4].result(),
MatchesKvsReadResult(absl::Cord("key4--")));
EXPECT_THAT(futures[5].result(),
MatchesKvsReadResult(absl::Cord("key5---")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(1));
}
{
SCOPED_TRACE(
"Read 6/6 entries from the same shard with inconsistent generation "
"constraints");
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options1;
options1.batch = Batch::New();
kvstore::ReadOptions options2;
options2.batch = options1.batch;
options2.generation_conditions.if_not_equal =
StorageGeneration::Invalid();
kvstore::ReadOptions options3;
options3.batch = options1.batch;
options3.generation_conditions.if_equal = StorageGeneration::Invalid();
futures = {
store->Read(key0, options1),
store->Read(key1, options1),
store->Read(key2, options2),
store->Read(key3, options1),
store->Read(key4, options3),
store->Read(key5, options1),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(futures[2].result(), MatchesKvsReadResultNotFound());
EXPECT_THAT(futures[3].result(), MatchesKvsReadResult(absl::Cord("key3-")));
EXPECT_THAT(futures[4].result(), MatchesKvsReadResultAborted());
EXPECT_THAT(futures[5].result(),
MatchesKvsReadResult(absl::Cord("key5---")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(3));
}
{
SCOPED_TRACE("Read 1 entry from each of two shards in a single batch");
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(key0, options),
store->Read(key6, options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(),
MatchesKvsReadResult(absl::Cord("key6----")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(6));
}
}
class ReadModifyWriteTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
tensorstore::kvstore::DriverPtr memory_store =
tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr GetStore(
tensorstore::neuroglancer_uint64_sharded::GetMaxChunksPerShardFunction
get_max_chunks_per_shard = {}) {
return GetShardedKeyValueStore(
mock_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(CachePool::Make(CachePool::Limits{})),
std::move(get_max_chunks_per_shard));
}
auto GetKvsBackedCache(kvstore::DriverPtr store = {}) {
if (!store) store = GetStore();
return GetCache<KvsBackedTestCache>(
CachePool::Make(CachePool::Limits{}).get(), "",
[&] { return std::make_unique<KvsBackedTestCache>(store); });
}
};
TEST_F(ReadModifyWriteTest, MultipleCaches) {
auto cache1 = GetKvsBackedCache();
auto cache2 = GetKvsBackedCache();
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, GetChunkKey(0x0))
->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, GetChunkKey(0x0))
->Modify(open_transaction, false, "def"));
auto read_future =
GetCacheEntry(cache1, GetChunkKey(0x0))->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(),
::testing::Optional(absl::Cord("abcdef")));
}
transaction.CommitAsync().IgnoreFuture();
auto write_req = mock_store->write_requests.pop();
write_req(memory_store);
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(ReadModifyWriteTest, MultiplePhasesMultipleCaches) {
auto cache1 = GetKvsBackedCache();
auto cache2 = GetKvsBackedCache();
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, GetChunkKey(0x0))
->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, GetChunkKey(0x0))
->Modify(open_transaction, false, "def"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, GetChunkKey(0x0))
->Modify(open_transaction, false, "ghi"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, GetChunkKey(0x0))
->Modify(open_transaction, false, "jkl"));
auto read_future =
GetCacheEntry(cache1, GetChunkKey(0x0))->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(),
::testing::Optional(absl::Cord("abcdefghijkl")));
}
transaction.CommitAsync().IgnoreFuture();
mock_store->write_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->write_requests.pop()(memory_store);
TENSORSTORE_EXPECT_OK(transaction.future());
}
TENSORSTORE_GLOBAL_INITIALIZER {
using ::tensorstore::internal::KvsBackedCacheBasicTransactionalTestOptions;
using ::tensorstore::internal::RegisterKvsBackedCacheBasicTransactionalTest;
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
for (bool underlying_atomic : {false, true}) {
KvsBackedCacheBasicTransactionalTestOptions options;
options.test_name = tensorstore::StrCat("Uint64Sharded/underlying_atomic=",
underlying_atomic);
options.get_store = [=] {
return GetShardedKeyValueStore(
tensorstore::GetMemoryKeyValueStore(underlying_atomic),
tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(CachePool::Make(CachePool::Limits{})), {});
};
options.delete_range_supported = false;
options.multi_key_atomic_supported = true;
options.get_key_getter = [] {
return [getter = std::make_shared<GetUint64Key>(true)](
auto key) { return (*getter)(key); };
};
RegisterKvsBackedCacheBasicTransactionalTest(options);
}
}
TEST(ShardedKeyValueStoreTest, SpecRoundtrip) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.roundtrip_key = std::string(8, '\0');
options.full_base_spec = {{"driver", "memory"}, {"path", "abc/"}};
options.full_spec = {{"driver", "neuroglancer_uint64_sharded"},
{"base", options.full_base_spec},
{"metadata", sharding_spec_json}};
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(ShardedKeyValueStoreTest, SpecRoundtripFile) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.roundtrip_key = std::string(8, '\0');
options.full_base_spec = {{"driver", "file"}, {"path", tempdir.path() + "/"}};
options.full_spec = {{"driver", "neuroglancer_uint64_sharded"},
{"base", options.full_base_spec},
{"metadata", sharding_spec_json}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(ShardedKeyValueStoreTest, Base) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec,
kvstore::Spec::FromJson({{"driver", "neuroglancer_uint64_sharded"},
{"base", "memory:
{"metadata", sharding_spec_json},
{"path", "1"}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_spec,
kvstore::Spec::FromJson("memory:
EXPECT_THAT(spec.base(), ::testing::Optional(base_spec));
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open(spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open(base_spec, context).result());
EXPECT_THAT(store.base(), ::testing::Optional(base_store));
auto transaction = tensorstore::Transaction(tensorstore::atomic_isolated);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store_with_txn, store | transaction);
EXPECT_THAT(store_with_txn.base(), base_store | transaction);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
84a69cff-dda7-4779-a36a-8b037048acc0 | cpp | google/tensorstore | murmurhash3 | tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3_test.cc | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.h"
#include <cstdint>
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
namespace {
constexpr uint32_t MurmurHash3_x86_128Mix(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
constexpr uint32_t RotateLeft(uint32_t x, int r) {
return (x << r) | (x >> (32 - r));
}
}
void MurmurHash3_x86_128Hash64Bits(uint64_t input, uint32_t h[4]) {
uint64_t h1 = h[0], h2 = h[1], h3 = h[2], h4 = h[3];
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t low = static_cast<uint32_t>(input);
const uint32_t high = input >> 32;
uint32_t k2 = high * c2;
k2 = RotateLeft(k2, 16);
k2 *= c3;
h2 ^= k2;
uint32_t k1 = low * c1;
k1 = RotateLeft(k1, 15);
k1 *= c2;
h1 ^= k1;
const uint32_t len = 8;
h1 ^= len;
h2 ^= len;
h3 ^= len;
h4 ^= len;
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h1 = MurmurHash3_x86_128Mix(h1);
h2 = MurmurHash3_x86_128Mix(h2);
h3 = MurmurHash3_x86_128Mix(h3);
h4 = MurmurHash3_x86_128Mix(h4);
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h[0] = h1;
h[1] = h2;
h[2] = h3;
h[3] = h4;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::neuroglancer_uint64_sharded::MurmurHash3_x86_128Hash64Bits;
TEST(MurmurHash3Test, Basic) {
uint32_t h[4];
h[0] = h[1] = h[2] = h[3] = 0;
MurmurHash3_x86_128Hash64Bits(0, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x00000000e028ae41, 0x000000004772b084,
0x000000004772b084, 0x000000004772b084));
h[0] = h[1] = h[2] = h[3] = 1;
MurmurHash3_x86_128Hash64Bits(0, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000005ad58a7e, 0x0000000054337108,
0x0000000054337108, 0x0000000054337108));
h[0] = h[1] = h[2] = h[3] = 2;
MurmurHash3_x86_128Hash64Bits(0, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x0000000064010da2, 0x0000000062e8bc17,
0x0000000062e8bc17, 0x0000000062e8bc17));
h[0] = h[1] = h[2] = h[3] = 0;
MurmurHash3_x86_128Hash64Bits(1, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x0000000016d4ce9a, 0x00000000e8bd67d6,
0x00000000e8bd67d6, 0x00000000e8bd67d6));
h[0] = h[1] = h[2] = h[3] = 1;
MurmurHash3_x86_128Hash64Bits(1, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000004b7ab8c6, 0x00000000eb555955,
0x00000000eb555955, 0x00000000eb555955));
h[0] = h[1] = h[2] = h[3] = 2;
MurmurHash3_x86_128Hash64Bits(1, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x00000000eb2301be, 0x0000000048e12494,
0x0000000048e12494, 0x0000000048e12494));
h[0] = h[1] = h[2] = h[3] = 0;
MurmurHash3_x86_128Hash64Bits(42, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000005119f47a, 0x00000000c20b94f9,
0x00000000c20b94f9, 0x00000000c20b94f9));
h[0] = h[1] = h[2] = h[3] = 1;
MurmurHash3_x86_128Hash64Bits(42, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x00000000d6b51bca, 0x00000000a25ad86b,
0x00000000a25ad86b, 0x00000000a25ad86b));
h[0] = h[1] = h[2] = h[3] = 2;
MurmurHash3_x86_128Hash64Bits(42, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000002d83d9c7, 0x00000000082115eb,
0x00000000082115eb, 0x00000000082115eb));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8367ff2e-15a0-4c35-9cf2-3c250ad2a825 | cpp | google/tensorstore | coordinator_server | tensorstore/kvstore/ocdbt/distributed/coordinator_server.cc | tensorstore/kvstore/ocdbt/distributed/coordinator_server_test.cc | #include "tensorstore/kvstore/ocdbt/distributed/coordinator_server.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_log.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/compare.h"
#include "grpcpp/security/server_credentials.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/server_callback.h"
#include "tensorstore/internal/container/heterogeneous_container.h"
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include "tensorstore/internal/grpc/peer_address.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator.grpc.pb.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator.pb.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security_registry.h"
#include "tensorstore/proto/encode_time.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace ocdbt {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
struct LeaseNode;
using LeaseTree = internal::intrusive_red_black_tree::Tree<LeaseNode>;
struct LeaseNode : public LeaseTree::NodeBase {
std::string key;
std::string owner;
absl::Time expiration_time;
uint64_t lease_id;
};
}
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
CoordinatorServer::Spec,
jb::Object(
jb::Member("security",
jb::Projection<&CoordinatorServer::Spec::security>(
internal_ocdbt::RpcSecurityMethodJsonBinder)),
jb::Member("bind_addresses",
jb::Projection<&CoordinatorServer::Spec::bind_addresses>(
jb::DefaultInitializedValue()))));
CoordinatorServer::CoordinatorServer() = default;
CoordinatorServer::~CoordinatorServer() = default;
CoordinatorServer::CoordinatorServer(CoordinatorServer&&) = default;
CoordinatorServer& CoordinatorServer::operator=(CoordinatorServer&&) = default;
class CoordinatorServer::Impl
: public internal_ocdbt::grpc_gen::Coordinator::CallbackService {
public:
std::vector<int> listening_ports_;
std::unique_ptr<grpc::Server> server_;
internal_ocdbt::RpcSecurityMethod::Ptr security_;
Clock clock_;
grpc::ServerUnaryReactor* RequestLease(
grpc::CallbackServerContext* context,
const internal_ocdbt::grpc_gen::LeaseRequest* request,
internal_ocdbt::grpc_gen::LeaseResponse* response) override;
void PurgeExpiredLeases() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
absl::Mutex mutex_;
LeaseTree leases_by_expiration_time_ ABSL_GUARDED_BY(mutex_);
using LeaseSet =
internal::HeterogeneousHashSet<std::unique_ptr<LeaseNode>,
std::string_view, &LeaseNode::key>;
LeaseSet leases_by_key_ ABSL_GUARDED_BY(mutex_);
};
span<const int> CoordinatorServer::ports() const {
return impl_->listening_ports_;
}
int CoordinatorServer::port() const { return impl_->listening_ports_.front(); }
void CoordinatorServer::Impl::PurgeExpiredLeases() {
auto now = clock_();
for (LeaseTree::iterator it = leases_by_expiration_time_.begin(), next;
it != leases_by_expiration_time_.end() && it->expiration_time < now;
it = next) {
next = std::next(it);
LeaseNode& node = *it;
leases_by_expiration_time_.Remove(node);
leases_by_key_.erase(node.key);
}
}
grpc::ServerUnaryReactor* CoordinatorServer::Impl::RequestLease(
grpc::CallbackServerContext* context,
const internal_ocdbt::grpc_gen::LeaseRequest* request,
internal_ocdbt::grpc_gen::LeaseResponse* response) {
auto* reactor = context->DefaultReactor();
if (auto status = security_->ValidateServerRequest(context); !status.ok()) {
reactor->Finish(internal::AbslStatusToGrpcStatus(status));
return reactor;
}
auto peer_address = internal::GetGrpcPeerAddressAndPort(context);
if (!peer_address.ok()) {
reactor->Finish(grpc::Status(grpc::StatusCode::INTERNAL,
std::string(peer_address.status().message())));
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Coordinator: internal error: request=" << *request;
return reactor;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto lease_duration,
internal::ProtoToAbslDuration(request->lease_duration()),
(reactor->Finish(grpc::Status(
grpc::StatusCode::INVALID_ARGUMENT,
tensorstore::StrCat("Invalid lease duration: ", _.message()))),
reactor));
{
absl::MutexLock lock(&mutex_);
PurgeExpiredLeases();
LeaseNode* node;
bool assign_new_lease = false;
bool renew_lease = false;
if (auto it = leases_by_key_.find(request->key());
it != leases_by_key_.end()) {
node = it->get();
if (request->has_renew_lease_id() &&
request->renew_lease_id() == node->lease_id) {
leases_by_expiration_time_.Remove(*node);
renew_lease = true;
} else if (request->has_uncooperative_lease_id() &&
request->uncooperative_lease_id() == node->lease_id) {
leases_by_expiration_time_.Remove(*node);
assign_new_lease = true;
}
} else {
auto new_node = std::make_unique<LeaseNode>();
new_node->key = request->key();
node = new_node.get();
leases_by_key_.insert(std::move(new_node));
assign_new_lease = true;
}
if (assign_new_lease || renew_lease) {
auto cur_time = clock_();
node->expiration_time = cur_time + lease_duration;
if (assign_new_lease) {
node->lease_id = static_cast<uint64_t>(
absl::ToInt64Nanoseconds(cur_time - absl::UnixEpoch()));
node->owner = tensorstore::StrCat(peer_address->first, ":",
request->cooperator_port());
}
response->set_is_owner(true);
leases_by_expiration_time_.FindOrInsert(
[&](LeaseNode& other) {
return node->expiration_time > other.expiration_time
? absl::weak_ordering::greater
: absl::weak_ordering::less;
},
[&] { return node; });
}
response->set_owner(node->owner);
internal::AbslTimeToProto(node->expiration_time,
response->mutable_expiration_time());
response->set_lease_id(node->lease_id);
}
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Coordinator: request=" << *request << ", response=" << *response;
reactor->Finish(grpc::Status());
return reactor;
}
Result<CoordinatorServer> CoordinatorServer::Start(Options options) {
auto impl = std::make_unique<Impl>();
if (options.clock) {
impl->clock_ = std::move(options.clock);
} else {
impl->clock_ = [] { return absl::Now(); };
}
impl->security_ = options.spec.security;
if (!impl->security_) {
impl->security_ = internal_ocdbt::GetInsecureRpcSecurityMethod();
}
grpc::ServerBuilder builder;
builder.RegisterService(impl.get());
auto creds = impl->security_->GetServerCredentials();
if (options.spec.bind_addresses.empty()) {
options.spec.bind_addresses.push_back("[::]:0");
}
impl->listening_ports_.resize(options.spec.bind_addresses.size());
for (size_t i = 0; i < options.spec.bind_addresses.size(); ++i) {
builder.AddListeningPort(options.spec.bind_addresses[i], creds,
&impl->listening_ports_[i]);
}
impl->server_ = builder.BuildAndStart();
CoordinatorServer server;
server.impl_ = std::move(impl);
return server;
}
}
} | #include "tensorstore/kvstore/ocdbt/distributed/coordinator_server.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/create_channel.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/ocdbt/distributed/btree_node_identifier.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator.grpc.pb.h"
#include "tensorstore/kvstore/ocdbt/distributed/lease_cache_for_cooperator.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::KeyRange;
using ::tensorstore::internal_ocdbt::BtreeNodeIdentifier;
using ::tensorstore::internal_ocdbt_cooperator::LeaseCacheForCooperator;
using ::tensorstore::ocdbt::CoordinatorServer;
class CoordinatorServerTest : public ::testing::Test {
protected:
absl::Time cur_time;
CoordinatorServer server_;
LeaseCacheForCooperator lease_cache;
void SetUp() override {
auto security =
::tensorstore::internal_ocdbt::GetInsecureRpcSecurityMethod();
CoordinatorServer::Options options;
options.spec.security = security;
options.spec.bind_addresses.push_back("localhost:0");
options.clock = [this] { return cur_time; };
TENSORSTORE_CHECK_OK_AND_ASSIGN(
server_, CoordinatorServer::Start(std::move(options)));
std::string address = tensorstore::StrCat("localhost:", server_.port());
auto channel =
::grpc::CreateChannel(address, security->GetClientCredentials());
if (!channel->WaitForConnected(
absl::ToChronoTime(absl::Now() + absl::Milliseconds(100)))) {
ABSL_LOG(WARNING) << "Failed to connect to coordinator after 100ms: "
<< address;
}
LeaseCacheForCooperator::Options lease_cache_options;
lease_cache_options.clock = {};
lease_cache_options.cooperator_port = 42;
lease_cache_options.coordinator_stub =
tensorstore::internal_ocdbt::grpc_gen::Coordinator::NewStub(
std::move(channel));
lease_cache_options.security = security;
lease_cache = LeaseCacheForCooperator(std::move(lease_cache_options));
}
};
TEST_F(CoordinatorServerTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto lease_info,
lease_cache
.GetLease("key", BtreeNodeIdentifier{1, KeyRange{"abc", "def"}})
.result());
EXPECT_FALSE(lease_info->peer_stub);
EXPECT_THAT(lease_info->peer_address, ::testing::MatchesRegex(".*:42"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/distributed/coordinator_server.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/distributed/coordinator_server_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d2e0d6b4-ac57-4181-aa0e-9c667da975dc | cpp | google/tensorstore | coalesce_kvstore | tensorstore/kvstore/ocdbt/io/coalesce_kvstore.cc | tensorstore/kvstore/ocdbt/io/coalesce_kvstore_test.cc | #include "tensorstore/kvstore/ocdbt/io/coalesce_kvstore.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
absl::Cord DeepCopyCord(const absl::Cord& cord) {
if (std::optional<absl::string_view> flat = cord.TryFlat();
flat.has_value()) {
return absl::Cord(*flat);
}
internal::FlatCordBuilder builder(cord.size(), false);
for (absl::string_view s : cord.Chunks()) {
builder.Append(s);
}
return std::move(builder).Build();
}
absl::Cord MaybeDeepCopyCord(absl::Cord cord) {
if (cord.EstimatedMemoryUsage() > (cord.size() * 1.2)) {
return DeepCopyCord(cord);
}
return cord;
}
struct PendingRead : public internal::AtomicReferenceCount<PendingRead> {
kvstore::Key key;
struct Op {
kvstore::ReadOptions options;
Promise<kvstore::ReadResult> promise;
};
std::vector<Op> pending_ops;
};
struct PendingReadEq {
using is_transparent = void;
inline bool operator()(const PendingRead& a, const PendingRead& b) const {
return a.key == b.key;
}
inline bool operator()(std::string_view a, std::string_view b) const {
return a == b;
}
inline bool operator()(const PendingRead& a, std::string_view b) const {
return a.key == b;
}
inline bool operator()(std::string_view a, const PendingRead& b) const {
return a == b.key;
}
inline bool operator()(std::string_view a,
const internal::IntrusivePtr<PendingRead>& b) const {
return b == nullptr ? false : PendingReadEq{}(a, *b);
}
inline bool operator()(const internal::IntrusivePtr<PendingRead>& a,
std::string_view b) const {
return a == nullptr ? false : PendingReadEq{}(*a, b);
}
inline bool operator()(const internal::IntrusivePtr<PendingRead>& a,
const internal::IntrusivePtr<PendingRead>& b) const {
return a->key == b->key;
}
};
struct PendingReadHash {
using is_transparent = void;
size_t operator()(std::string_view k) const { return absl::HashOf(k); }
size_t operator()(const internal::IntrusivePtr<PendingRead>& k) const {
return absl::HashOf(k->key);
}
};
class CoalesceKvStoreDriver final : public kvstore::Driver {
public:
explicit CoalesceKvStoreDriver(kvstore::DriverPtr base, size_t threshold,
size_t merged_threshold,
absl::Duration interval, Executor executor)
: base_(std::move(base)),
threshold_(threshold),
merged_threshold_(merged_threshold),
interval_(interval),
thread_pool_executor_(std::move(executor)) {}
~CoalesceKvStoreDriver() override = default;
Future<ReadResult> Read(Key key, ReadOptions options = {}) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override {
return base_->Write(std::move(key), std::move(value), std::move(options));
}
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override {
return base_->ReadModifyWrite(transaction, phase, std::move(key), source);
}
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction,
KeyRange range) override {
return base_->TransactionalDeleteRange(transaction, std::move(range));
}
Future<const void> DeleteRange(KeyRange range) override {
return base_->DeleteRange(std::move(range));
}
void ListImpl(ListOptions options, ListReceiver receiver) override {
return base_->ListImpl(std::move(options), std::move(receiver));
}
std::string DescribeKey(std::string_view key) override {
return base_->DescribeKey(key);
}
Result<kvstore::DriverSpecPtr> GetBoundSpec() const override {
return base_->GetBoundSpec();
}
kvstore::SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return base_->GetSupportedFeatures(key_range);
}
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const override {
return base_->GarbageCollectionVisit(visitor);
}
void StartNextRead(internal::IntrusivePtr<PendingRead> state_ptr);
private:
kvstore::DriverPtr base_;
size_t threshold_;
size_t merged_threshold_;
absl::Duration interval_;
Executor thread_pool_executor_;
absl::Mutex mu_;
absl::flat_hash_set<internal::IntrusivePtr<PendingRead>, PendingReadHash,
PendingReadEq>
pending_ ABSL_GUARDED_BY(mu_);
};
Future<kvstore::ReadResult> CoalesceKvStoreDriver::Read(Key key,
ReadOptions options) {
internal::IntrusivePtr<PendingRead> state_ptr;
{
absl::MutexLock l(&mu_);
auto it = pending_.find(std::string_view(key));
if (it != pending_.end()) {
auto& state = *it;
auto op = PromiseFuturePair<ReadResult>::Make();
state->pending_ops.emplace_back(
PendingRead::Op{std::move(options), std::move(op.promise)});
return std::move(op.future);
} else {
state_ptr = internal::MakeIntrusivePtr<PendingRead>();
state_ptr->key = key;
bool inserted;
std::tie(it, inserted) = pending_.insert(state_ptr);
if (interval_ != absl::ZeroDuration()) {
internal::ScheduleAt(
absl::Now() + interval_,
[self = internal::IntrusivePtr<CoalesceKvStoreDriver>(this),
state = std::move(state_ptr)] {
auto& executor = self->thread_pool_executor_;
executor([self = std::move(self), state = std::move(state)] {
self->StartNextRead(std::move(state));
});
});
auto& state = *it;
auto op = PromiseFuturePair<ReadResult>::Make();
state->pending_ops.emplace_back(
PendingRead::Op{std::move(options), std::move(op.promise)});
return std::move(op.future);
}
}
}
auto future = base_->Read(key, std::move(options));
future.ExecuteWhenReady(
[self = internal::IntrusivePtr<CoalesceKvStoreDriver>(this),
state = std::move(state_ptr)](ReadyFuture<ReadResult>) {
auto& executor = self->thread_pool_executor_;
executor([self = std::move(self), state = std::move(state)] {
self->StartNextRead(std::move(state));
});
});
return future;
}
struct MergeValue {
kvstore::ReadOptions options;
struct Entry {
OptionalByteRangeRequest byte_range;
Promise<kvstore::ReadResult> promise;
};
std::vector<Entry> subreads;
};
void OnReadComplete(MergeValue merge_values,
ReadyFuture<kvstore::ReadResult> ready) {
if (!ready.result().ok() || !ready.value().has_value() ||
merge_values.subreads.size() == 1) {
for (const auto& e : merge_values.subreads) {
e.promise.SetResult(ready.result());
}
} else {
kvstore::ReadResult result = ready.value();
absl::Cord value = std::move(result.value);
for (const auto& e : merge_values.subreads) {
size_t request_start, request_size;
if (e.byte_range.inclusive_min < 0) {
request_start = value.size() + e.byte_range.inclusive_min;
} else {
request_start = e.byte_range.inclusive_min -
merge_values.options.byte_range.inclusive_min;
}
if (e.byte_range.exclusive_max == -1) {
request_size = std::numeric_limits<size_t>::max();
} else {
request_size = e.byte_range.exclusive_max - e.byte_range.inclusive_min;
}
result.value =
MaybeDeepCopyCord(value.Subcord(request_start, request_size));
e.promise.SetResult(result);
}
}
}
void CoalesceKvStoreDriver::StartNextRead(
internal::IntrusivePtr<PendingRead> state_ptr) {
std::vector<PendingRead::Op> pending;
{
absl::MutexLock l(&mu_);
if (state_ptr->pending_ops.empty()) {
pending_.erase(state_ptr->key);
return;
} else {
std::swap(pending, state_ptr->pending_ops);
}
}
if (interval_ != absl::ZeroDuration()) {
internal::ScheduleAt(
absl::Now() + interval_,
[self = internal::IntrusivePtr<CoalesceKvStoreDriver>(this),
state = state_ptr] {
auto& executor = self->thread_pool_executor_;
executor([self = std::move(self), state = std::move(state)] {
self->StartNextRead(std::move(state));
});
});
}
std::sort(pending.begin(), pending.end(), [](const auto& a, const auto& b) {
return std::tie(a.options.generation_conditions.if_equal.value,
a.options.generation_conditions.if_not_equal.value,
a.options.byte_range.inclusive_min,
a.options.byte_range.exclusive_max) <
std::tie(b.options.generation_conditions.if_equal.value,
b.options.generation_conditions.if_not_equal.value,
b.options.byte_range.inclusive_min,
b.options.byte_range.exclusive_max);
});
kvstore::Key key = state_ptr->key;
MergeValue merged;
const auto& first_pending = pending.front();
merged.options = first_pending.options;
merged.subreads.emplace_back(
MergeValue::Entry{std::move(first_pending.options.byte_range),
std::move(first_pending.promise)});
for (size_t i = 1; i < pending.size(); ++i) {
auto& e = pending[i];
if (e.options.generation_conditions.if_equal !=
merged.options.generation_conditions.if_equal ||
e.options.generation_conditions.if_not_equal !=
merged.options.generation_conditions.if_not_equal ||
(e.options.byte_range.inclusive_min < 0) !=
(merged.options.byte_range.inclusive_min < 0)) {
assert(!merged.subreads.empty());
auto f = base_->Read(key, merged.options);
f.ExecuteWhenReady(
[merged = std::move(merged)](ReadyFuture<kvstore::ReadResult> ready) {
OnReadComplete(std::move(merged), std::move(ready));
});
merged = MergeValue{};
merged.options = e.options;
} else if (merged.options.byte_range.exclusive_max != -1 &&
((e.options.byte_range.inclusive_min -
merged.options.byte_range.exclusive_max >
threshold_) ||
(merged_threshold_ > 0 &&
merged.options.byte_range.size() > merged_threshold_))) {
assert(!merged.subreads.empty());
auto f = base_->Read(key, merged.options);
f.ExecuteWhenReady(
[merged = std::move(merged)](ReadyFuture<kvstore::ReadResult> ready) {
OnReadComplete(std::move(merged), std::move(ready));
});
merged = MergeValue{};
merged.options = e.options;
} else {
merged.options.staleness_bound =
std::max(merged.options.staleness_bound, e.options.staleness_bound);
merged.options.byte_range.inclusive_min =
std::min(merged.options.byte_range.inclusive_min,
e.options.byte_range.inclusive_min);
if (merged.options.byte_range.exclusive_max != -1) {
if (e.options.byte_range.exclusive_max != -1) {
merged.options.byte_range.exclusive_max =
std::max(merged.options.byte_range.exclusive_max,
e.options.byte_range.exclusive_max);
} else {
merged.options.byte_range.exclusive_max = -1;
}
}
}
merged.subreads.emplace_back(MergeValue::Entry{
std::move(e.options.byte_range), std::move(e.promise)});
}
assert(!merged.subreads.empty());
auto f = base_->Read(key, merged.options);
f.ExecuteWhenReady(
[self = internal::IntrusivePtr<CoalesceKvStoreDriver>(this),
merged = std::move(merged),
state = std::move(state_ptr)](ReadyFuture<kvstore::ReadResult> ready) {
auto& executor = self->thread_pool_executor_;
executor([self = std::move(self), merged = std::move(merged),
state = std::move(state), ready = std::move(ready)] {
OnReadComplete(std::move(merged), std::move(ready));
if (self->interval_ == absl::ZeroDuration()) {
self->StartNextRead(std::move(state));
}
});
});
}
}
kvstore::DriverPtr MakeCoalesceKvStoreDriver(kvstore::DriverPtr base,
size_t threshold,
size_t merged_threshold,
absl::Duration interval,
Executor executor) {
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Coalescing reads with threshold: " << threshold
<< ", merged_threshold: " << merged_threshold
<< ", interval: " << interval;
return internal::MakeIntrusivePtr<CoalesceKvStoreDriver>(
std::move(base), threshold, merged_threshold, interval,
std::move(executor));
}
}
} | #include "tensorstore/kvstore/ocdbt/io/coalesce_kvstore.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal_ocdbt::MakeCoalesceKvStoreDriver;
using ::tensorstore::kvstore::ReadOptions;
TEST(CoalesceKvstoreTest, SimpleRead) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
auto mock_key_value_store = MockKeyValueStore::Make();
auto coalesce_driver = MakeCoalesceKvStoreDriver(
mock_key_value_store, 100, 0,
absl::ZeroDuration(),
tensorstore::internal::DetachedThreadPool(1));
auto write_future = kvstore::Write(coalesce_driver, "a", absl::Cord("a"));
write_future.Force();
{
auto req = mock_key_value_store->write_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
auto read_future = kvstore::Read(coalesce_driver, "a");
read_future.Force();
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
ASSERT_TRUE(read_future.result().has_value());
ASSERT_TRUE(read_future.result().value().has_value());
EXPECT_EQ(read_future.result().value().value, absl::Cord("a"));
}
TEST(CoalesceKvstoreTest, ReadWithThreshold) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
auto mock_key_value_store = MockKeyValueStore::Make();
auto coalesce_driver = MakeCoalesceKvStoreDriver(
mock_key_value_store, 1, 0,
absl::ZeroDuration(),
tensorstore::internal::DetachedThreadPool(1));
auto write_future =
kvstore::Write(coalesce_driver, "a", absl::Cord("0123456789"));
write_future.Force();
{
auto req = mock_key_value_store->write_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
ReadOptions ro1, ro2, ro3, ro4;
ro1.byte_range =
OptionalByteRangeRequest(0, 1);
ro2.byte_range = OptionalByteRangeRequest(2, 3);
ro3.byte_range = OptionalByteRangeRequest(4, 5);
ro4.byte_range =
OptionalByteRangeRequest(7, 8);
auto read_future1 = kvstore::Read(coalesce_driver, "a", ro1);
auto read_future2 = kvstore::Read(coalesce_driver, "a", ro2);
auto read_future3 = kvstore::Read(coalesce_driver, "a", ro3);
auto read_future4 = kvstore::Read(coalesce_driver, "a", ro4);
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, ro1.byte_range);
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future1.result());
EXPECT_EQ(read_future1.result().value().value, absl::Cord("0"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(2, 5));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future2.result());
EXPECT_EQ(read_future2.result().value().value, absl::Cord("2"));
TENSORSTORE_EXPECT_OK(read_future3.result());
EXPECT_EQ(read_future3.result().value().value, absl::Cord("4"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(7, 8));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future4.result());
EXPECT_EQ(read_future4.result().value().value, absl::Cord("7"));
}
TEST(CoalesceKvstoreTest, ReadWithMergedThreshold) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
auto mock_key_value_store = MockKeyValueStore::Make();
auto coalesce_driver = MakeCoalesceKvStoreDriver(
mock_key_value_store, 1, 2,
absl::ZeroDuration(),
tensorstore::internal::DetachedThreadPool(1));
auto write_future =
kvstore::Write(coalesce_driver, "a", absl::Cord("0123456789"));
write_future.Force();
{
auto req = mock_key_value_store->write_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
ReadOptions ro1, ro2, ro3, ro4, ro5;
ro1.byte_range =
OptionalByteRangeRequest(0, 1);
ro2.byte_range = OptionalByteRangeRequest(2, 3);
ro3.byte_range = OptionalByteRangeRequest(4, 5);
ro4.byte_range = OptionalByteRangeRequest(6, 7);
ro5.byte_range = OptionalByteRangeRequest(8, 9);
auto read_future1 = kvstore::Read(coalesce_driver, "a", ro1);
auto read_future2 = kvstore::Read(coalesce_driver, "a", ro2);
auto read_future3 = kvstore::Read(coalesce_driver, "a", ro3);
auto read_future4 = kvstore::Read(coalesce_driver, "a", ro4);
auto read_future5 = kvstore::Read(coalesce_driver, "a", ro5);
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, ro1.byte_range);
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future1.result());
EXPECT_EQ(read_future1.result().value().value, absl::Cord("0"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(2, 5));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future2.result());
EXPECT_EQ(read_future2.result().value().value, absl::Cord("2"));
TENSORSTORE_EXPECT_OK(read_future3.result());
EXPECT_EQ(read_future3.result().value().value, absl::Cord("4"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(6, 9));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future4.result());
EXPECT_EQ(read_future4.result().value().value, absl::Cord("6"));
TENSORSTORE_EXPECT_OK(read_future5.result());
EXPECT_EQ(read_future5.result().value().value, absl::Cord("8"));
}
TEST(CoalesceKvstoreTest, ReadWithInterval) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
auto mock_key_value_store = MockKeyValueStore::Make();
auto coalesce_driver = MakeCoalesceKvStoreDriver(
mock_key_value_store, 1, 0,
absl::Milliseconds(10),
tensorstore::internal::DetachedThreadPool(1));
auto write_future =
kvstore::Write(coalesce_driver, "a", absl::Cord("0123456789"));
write_future.Force();
{
auto req = mock_key_value_store->write_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
ReadOptions ro1, ro2, ro3, ro4;
ro1.byte_range = OptionalByteRangeRequest(0, 1);
ro2.byte_range = OptionalByteRangeRequest(2, 3);
ro3.byte_range = OptionalByteRangeRequest(4, 5);
ro4.byte_range =
OptionalByteRangeRequest(7, 8);
auto read_future1 = kvstore::Read(coalesce_driver, "a", ro1);
auto read_future2 = kvstore::Read(coalesce_driver, "a", ro2);
auto read_future3 = kvstore::Read(coalesce_driver, "a", ro3);
auto read_future4 = kvstore::Read(coalesce_driver, "a", ro4);
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(0, 5));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future1.result());
EXPECT_EQ(read_future1.result().value().value, absl::Cord("0"));
TENSORSTORE_EXPECT_OK(read_future2.result());
EXPECT_EQ(read_future2.result().value().value, absl::Cord("2"));
TENSORSTORE_EXPECT_OK(read_future3.result());
EXPECT_EQ(read_future3.result().value().value, absl::Cord("4"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(7, 8));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future4.result());
EXPECT_EQ(read_future4.result().value().value, absl::Cord("7"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/io/coalesce_kvstore.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/io/coalesce_kvstore_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
750c58e8-c353-449d-8065-8f1bc4edfbe2 | cpp | google/tensorstore | indirect_data_writer | tensorstore/kvstore/ocdbt/io/indirect_data_writer.cc | tensorstore/kvstore/ocdbt/io/indirect_data_writer_test.cc | #include "tensorstore/kvstore/ocdbt/io/indirect_data_writer.h"
#include <stddef.h>
#include <cassert>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
auto& indirect_data_writer_histogram =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/kvstore/ocdbt/indirect_data_write_size",
internal_metrics::MetricMetadata(
"Histogram of OCDBT buffered write sizes.",
internal_metrics::Units::kBytes));
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
}
class IndirectDataWriter
: public internal::AtomicReferenceCount<IndirectDataWriter> {
public:
explicit IndirectDataWriter(kvstore::KvStore kvstore, std::string prefix,
size_t target_size)
: kvstore_(std::move(kvstore)),
prefix_(std::move(prefix)),
target_size_(target_size) {}
kvstore::KvStore kvstore_;
std::string prefix_;
size_t target_size_;
absl::Mutex mutex_;
size_t in_flight_ = 0;
bool flush_requested_ = false;
absl::Cord buffer_;
Promise<void> promise_;
DataFileId data_file_id_;
};
void intrusive_ptr_increment(IndirectDataWriter* p) {
intrusive_ptr_increment(
static_cast<internal::AtomicReferenceCount<IndirectDataWriter>*>(p));
}
void intrusive_ptr_decrement(IndirectDataWriter* p) {
intrusive_ptr_decrement(
static_cast<internal::AtomicReferenceCount<IndirectDataWriter>*>(p));
}
namespace {
void MaybeFlush(IndirectDataWriter& self, UniqueWriterLock<absl::Mutex> lock) {
bool buffer_at_target =
self.target_size_ > 0 && self.buffer_.size() >= self.target_size_;
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "MaybeFlush: flush_requested=" << self.flush_requested_
<< ", in_flight=" << self.in_flight_
<< ", buffer_at_target=" << buffer_at_target;
if (buffer_at_target) {
} else if (!self.flush_requested_ || self.in_flight_ > 0) {
return;
}
self.in_flight_++;
self.flush_requested_ = false;
Promise<void> promise = std::exchange(self.promise_, {});
absl::Cord buffer = std::exchange(self.buffer_, {});
DataFileId data_file_id = self.data_file_id_;
lock.unlock();
indirect_data_writer_histogram.Observe(buffer.size());
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Flushing " << buffer.size() << " bytes to " << data_file_id;
auto write_future =
kvstore::Write(self.kvstore_, data_file_id.FullPath(), std::move(buffer));
write_future.Force();
write_future.ExecuteWhenReady(
[promise = std::move(promise), data_file_id = std::move(data_file_id),
self = internal::IntrusivePtr<IndirectDataWriter>(&self)](
ReadyFuture<TimestampedStorageGeneration> future) {
auto& r = future.result();
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Done flushing data to " << data_file_id << ": " << r.status();
if (!r.ok()) {
promise.SetResult(r.status());
} else if (StorageGeneration::IsUnknown(r->generation)) {
promise.SetResult(absl::UnavailableError("Non-unique file id"));
} else {
promise.SetResult(absl::OkStatus());
}
UniqueWriterLock lock{self->mutex_};
assert(self->in_flight_ > 0);
self->in_flight_--;
MaybeFlush(*self, std::move(lock));
});
}
}
Future<const void> Write(IndirectDataWriter& self, absl::Cord data,
IndirectDataReference& ref) {
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Write indirect data: size=" << data.size();
if (data.empty()) {
ref.file_id = DataFileId{};
ref.offset = 0;
ref.length = 0;
return absl::OkStatus();
}
UniqueWriterLock lock{self.mutex_};
Future<const void> future;
if (self.promise_.null() || (future = self.promise_.future()).null()) {
self.data_file_id_ = GenerateDataFileId(self.prefix_);
auto p = PromiseFuturePair<void>::Make();
self.promise_ = std::move(p.promise);
future = std::move(p.future);
self.promise_.ExecuteWhenForced(
[self = internal::IntrusivePtr<IndirectDataWriter>(&self)](
Promise<void> promise) {
ABSL_LOG_IF(INFO, ocdbt_logging) << "Force called";
UniqueWriterLock lock{self->mutex_};
if (!HaveSameSharedState(promise, self->promise_)) return;
self->flush_requested_ = true;
MaybeFlush(*self, std::move(lock));
});
}
ref.file_id = self.data_file_id_;
ref.offset = self.buffer_.size();
ref.length = data.size();
self.buffer_.Append(std::move(data));
if (self.target_size_ > 0 && self.buffer_.size() >= self.target_size_) {
MaybeFlush(self, std::move(lock));
}
return future;
}
IndirectDataWriterPtr MakeIndirectDataWriter(kvstore::KvStore kvstore,
std::string prefix,
size_t target_size) {
return internal::MakeIntrusivePtr<IndirectDataWriter>(
std::move(kvstore), std::move(prefix), target_size);
}
}
} | #include "tensorstore/kvstore/ocdbt/io/indirect_data_writer.h"
#include <algorithm>
#include <cstring>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Future;
using ::tensorstore::internal::FlatCordBuilder;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal_ocdbt::IndirectDataReference;
using ::tensorstore::internal_ocdbt::MakeIndirectDataWriter;
using ::tensorstore::internal_ocdbt::Write;
namespace {
absl::Cord GetCord(size_t size) {
FlatCordBuilder cord_builder(size);
memset(cord_builder.data(), 0x37, cord_builder.size());
return std::move(cord_builder).Build();
}
template <typename T>
std::vector<std::string> ListEntriesToFiles(T& entries) {
std::vector<std::string> files;
files.reserve(entries.size());
for (auto& e : entries) {
files.push_back(std::move(e.key));
}
std::sort(files.begin(), files.end());
return files;
}
TEST(IndirectDataWriter, UnlimitedSize) {
auto data = GetCord(260);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
auto mock_key_value_store = MockKeyValueStore::Make();
auto writer = MakeIndirectDataWriter(
tensorstore::kvstore::KvStore(mock_key_value_store), "d/", 0);
std::vector<Future<const void>> futures;
std::vector<std::string> refs;
for (int i = 0; i < 1000; ++i) {
IndirectDataReference ref;
auto f = Write(*writer, data, ref);
if (refs.empty() || refs.back() != ref.file_id.FullPath()) {
refs.push_back(ref.file_id.FullPath());
}
f.Force();
futures.push_back(std::move(f));
}
std::sort(refs.begin(), refs.end());
EXPECT_THAT(refs, ::testing::SizeIs(::testing::Eq(2)));
while (!mock_key_value_store->write_requests.empty()) {
EXPECT_THAT(mock_key_value_store->write_requests.size(), ::testing::Eq(1));
auto r = mock_key_value_store->write_requests.pop();
r(memory_store);
}
for (auto& f : futures) {
TENSORSTORE_ASSERT_OK(f.status());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entries,
tensorstore::kvstore::ListFuture(memory_store.get()).result());
auto files = ListEntriesToFiles(entries);
EXPECT_THAT(files, ::testing::SizeIs(2));
EXPECT_THAT(files, ::testing::ElementsAreArray(refs));
}
TEST(IndirectDataWriter, LimitedSize) {
constexpr size_t kTargetSize = 1024;
auto data = GetCord(260);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
auto mock_key_value_store = MockKeyValueStore::Make();
auto writer = MakeIndirectDataWriter(
tensorstore::kvstore::KvStore(mock_key_value_store), "d/", kTargetSize);
std::vector<Future<const void>> futures;
std::vector<std::string> refs;
for (int i = 0; i < 1000; ++i) {
IndirectDataReference ref;
auto f = Write(*writer, data, ref);
EXPECT_THAT(ref.offset, testing::Le(kTargetSize));
if (refs.empty() || refs.back() != ref.file_id.FullPath()) {
refs.push_back(ref.file_id.FullPath());
}
f.Force();
futures.push_back(std::move(f));
}
std::sort(refs.begin(), refs.end());
EXPECT_THAT(refs, ::testing::SizeIs(::testing::Ge(250)));
EXPECT_THAT(mock_key_value_store->write_requests.size(), ::testing::Gt(1));
while (!mock_key_value_store->write_requests.empty()) {
auto r = mock_key_value_store->write_requests.pop();
r(memory_store);
}
for (auto& f : futures) {
TENSORSTORE_ASSERT_OK(f.status());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entries,
tensorstore::kvstore::ListFuture(memory_store.get()).result());
auto files = ListEntriesToFiles(entries);
EXPECT_THAT(files, ::testing::SizeIs(refs.size()));
EXPECT_THAT(files, ::testing::ElementsAreArray(refs));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/io/indirect_data_writer.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/io/indirect_data_writer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f41746de-0a2d-465c-afe8-d44b6c3c4991 | cpp | google/tensorstore | read_version | tensorstore/kvstore/ocdbt/non_distributed/read_version.cc | tensorstore/kvstore/ocdbt/read_version_test.cc | #include "tensorstore/kvstore/ocdbt/non_distributed/read_version.h"
#include <cassert>
#include <memory>
#include <utility>
#include <variant>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/kvstore/ocdbt/io_handle.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
struct ReadVersionOperation
: public internal::AtomicReferenceCount<ReadVersionOperation> {
using Ptr = internal::IntrusivePtr<ReadVersionOperation>;
using PromiseType = Promise<BtreeGenerationReference>;
ReadonlyIoHandle::Ptr io_handle;
VersionSpec version_spec;
absl::Time staleness_bound;
static Future<BtreeGenerationReference> Start(ReadonlyIoHandle::Ptr io_handle,
VersionSpec version_spec,
absl::Time staleness_bound) {
auto op = internal::MakeIntrusivePtr<ReadVersionOperation>();
op->io_handle = std::move(io_handle);
op->version_spec = version_spec;
op->staleness_bound = staleness_bound;
auto [promise, future] =
PromiseFuturePair<BtreeGenerationReference>::Make();
RequestManifest(std::move(op), std::move(promise), absl::InfinitePast());
return std::move(future);
}
static void RequestManifest(ReadVersionOperation::Ptr op, PromiseType promise,
absl::Time staleness_bound) {
auto* op_ptr = op.get();
LinkValue(
WithExecutor(op_ptr->io_handle->executor,
[op = std::move(op)](
PromiseType promise,
ReadyFuture<const ManifestWithTime> future) mutable {
ManifestReady(std::move(op), std::move(promise),
future.value());
}),
std::move(promise), op_ptr->io_handle->GetManifest(staleness_bound));
}
static void ManifestReady(ReadVersionOperation::Ptr op, PromiseType promise,
const ManifestWithTime& manifest_with_time) {
if (!manifest_with_time.manifest ||
CompareVersionSpecToVersion(
op->version_spec, manifest_with_time.manifest->latest_version()) >
0) {
if (manifest_with_time.time < op->staleness_bound) {
auto staleness_bound = op->staleness_bound;
RequestManifest(std::move(op), std::move(promise), staleness_bound);
return;
}
if (!manifest_with_time.manifest ||
IsVersionSpecExact(op->version_spec)) {
op->VersionNotPresent(promise);
return;
}
}
const auto& manifest = *manifest_with_time.manifest;
if (CompareVersionSpecToVersion(op->version_spec,
manifest.versions.front()) >= 0) {
if (auto* ref = internal_ocdbt::FindVersion(manifest.versions,
op->version_spec)) {
promise.SetResult(*ref);
return;
}
op->VersionNotPresent(promise);
return;
}
auto* ref = internal_ocdbt::FindVersion(
manifest.config.version_tree_arity_log2, manifest.version_tree_nodes,
op->version_spec);
if (!ref) {
op->VersionNotPresent(promise);
return;
}
LookupNodeReference(std::move(op), std::move(promise), *ref);
}
void VersionNotPresent(const PromiseType& promise) {
promise.SetResult(absl::NotFoundError(absl::StrFormat(
"Version where %s not present", FormatVersionSpec(version_spec))));
}
static void LookupNodeReference(ReadVersionOperation::Ptr op,
PromiseType promise,
const VersionNodeReference& node_ref) {
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "ReadVersion: " << FormatVersionSpec(op->version_spec)
<< ", node_ref=" << node_ref;
auto read_future = op->io_handle->GetVersionTreeNode(node_ref.location);
auto executor = op->io_handle->executor;
LinkValue(WithExecutor(std::move(executor),
NodeReadyCallback{std::move(op), node_ref}),
std::move(promise), std::move(read_future));
}
struct NodeReadyCallback {
ReadVersionOperation::Ptr op;
VersionNodeReference node_ref;
void operator()(
PromiseType promise,
ReadyFuture<const std::shared_ptr<const VersionTreeNode>> read_future) {
auto node = read_future.value();
auto* config = op->io_handle->config_state->GetExistingConfig();
assert(config);
TENSORSTORE_RETURN_IF_ERROR(
ValidateVersionTreeNodeReference(
*node, *config, node_ref.generation_number, node_ref.height),
static_cast<void>(promise.SetResult(_)));
if (node->height > 0) {
VisitInteriorNode(std::move(op), *node, std::move(promise));
} else {
VisitLeafNode(std::move(op), *node, std::move(promise));
}
}
};
static void VisitInteriorNode(ReadVersionOperation::Ptr op,
const VersionTreeNode& node,
PromiseType promise) {
auto& entries =
std::get<VersionTreeNode::InteriorNodeEntries>(node.entries);
auto* config = op->io_handle->config_state->GetExistingConfig();
assert(config);
auto* node_ref = internal_ocdbt::FindVersion(
config->version_tree_arity_log2, entries, op->version_spec);
if (!node_ref) {
op->VersionNotPresent(std::move(promise));
return;
}
LookupNodeReference(std::move(op), std::move(promise), *node_ref);
}
static void VisitLeafNode(ReadVersionOperation::Ptr op,
const VersionTreeNode& node, PromiseType promise) {
auto& entries = std::get<VersionTreeNode::LeafNodeEntries>(node.entries);
auto* ref = internal_ocdbt::FindVersion(entries, op->version_spec);
if (!ref) {
op->VersionNotPresent(std::move(promise));
return;
}
promise.SetResult(*ref);
}
};
}
Future<BtreeGenerationReference> ReadVersion(ReadonlyIoHandle::Ptr io_handle,
VersionSpec version_spec,
absl::Time staleness_bound) {
if (const GenerationNumber* generation_number =
std::get_if<GenerationNumber>(&version_spec)) {
if (*generation_number == 0) {
return absl::InvalidArgumentError("Generation number must be positive");
}
}
return ReadVersionOperation::Start(std::move(io_handle), version_spec,
std::move(staleness_bound));
}
}
} | #include "tensorstore/kvstore/ocdbt/non_distributed/read_version.h"
#include <stddef.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include <nlohmann/json.hpp>
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/driver.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/create_new_manifest.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/list_versions.h"
#include "tensorstore/kvstore/ocdbt/test_util.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::internal_ocdbt::BtreeGenerationReference;
using ::tensorstore::internal_ocdbt::CommitTime;
using ::tensorstore::internal_ocdbt::CommitTimeUpperBound;
using ::tensorstore::internal_ocdbt::EnsureExistingManifest;
using ::tensorstore::internal_ocdbt::GenerationNumber;
using ::tensorstore::internal_ocdbt::GetOcdbtIoHandle;
using ::tensorstore::internal_ocdbt::ListVersionsFuture;
using ::tensorstore::internal_ocdbt::ListVersionsOptions;
using ::tensorstore::internal_ocdbt::OcdbtDriver;
using ::tensorstore::internal_ocdbt::ReadManifest;
using ::tensorstore::internal_ocdbt::ReadVersion;
void TestVersioning(::nlohmann::json config_json, size_t num_writes) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto ocdbt_store,
kvstore::Open(
{{"driver", "ocdbt"}, {"config", config_json}, {"base", "memory:
.result());
auto io_handle = GetOcdbtIoHandle(*ocdbt_store.driver);
std::vector<BtreeGenerationReference> generations;
TENSORSTORE_ASSERT_OK(EnsureExistingManifest(io_handle));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto manifest,
ReadManifest(static_cast<OcdbtDriver&>(*ocdbt_store.driver)));
ASSERT_TRUE(manifest);
ASSERT_EQ(1, manifest->latest_generation());
generations.push_back(manifest->latest_version());
}
for (int i = 0; i < num_writes; ++i) {
UniqueNow(absl::Nanoseconds(2));
TENSORSTORE_ASSERT_OK(
kvstore::Write(ocdbt_store, "a", absl::Cord(tensorstore::StrCat(i))));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto manifest,
ReadManifest(static_cast<OcdbtDriver&>(*ocdbt_store.driver)));
ASSERT_TRUE(manifest);
ASSERT_EQ(i + 2, manifest->latest_generation());
generations.push_back(manifest->latest_version());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto manifest,
ReadManifest(static_cast<OcdbtDriver&>(*ocdbt_store.driver)));
ASSERT_TRUE(manifest);
SCOPED_TRACE(tensorstore::StrCat(*manifest));
{
ListVersionsOptions list_versions_options;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto final_generations,
ListVersionsFuture(io_handle, list_versions_options).result());
EXPECT_EQ(generations, final_generations);
}
for (size_t version_i = 0; version_i < generations.size(); ++version_i) {
const auto& version = generations[version_i];
EXPECT_THAT(ReadVersion(io_handle, version.generation_number).result(),
::testing::Optional(version));
EXPECT_THAT(ReadVersion(io_handle, version.commit_time).result(),
::testing::Optional(version));
EXPECT_THAT(
ReadVersion(io_handle, CommitTimeUpperBound{version.commit_time})
.result(),
::testing::Optional(version));
{
CommitTime newer_commit_time = version.commit_time;
newer_commit_time.value++;
EXPECT_THAT(
ReadVersion(io_handle, CommitTimeUpperBound{newer_commit_time})
.result(),
::testing::Optional(version));
}
}
EXPECT_THAT(ReadVersion(io_handle, GenerationNumber(0)).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ReadVersion(io_handle, generations.back().generation_number + 1).result(),
MatchesStatus(absl::StatusCode::kNotFound));
{
CommitTime newer_commit_time = generations.back().commit_time;
newer_commit_time.value++;
EXPECT_THAT(ReadVersion(io_handle, newer_commit_time).result(),
MatchesStatus(absl::StatusCode::kNotFound));
}
{
CommitTime older_commit_time = generations.front().commit_time;
older_commit_time.value--;
EXPECT_THAT(ReadVersion(io_handle, older_commit_time).result(),
MatchesStatus(absl::StatusCode::kNotFound));
EXPECT_THAT(ReadVersion(io_handle, CommitTimeUpperBound{older_commit_time})
.result(),
MatchesStatus(absl::StatusCode::kNotFound));
}
for (ptrdiff_t version_i = -1; version_i <= generations.size(); ++version_i) {
SCOPED_TRACE(absl::StrFormat("version_i=%d", version_i));
GenerationNumber generation_number =
static_cast<GenerationNumber>(version_i + 1);
CommitTime intermediate_commit_time, exact_commit_time;
if (version_i == -1) {
exact_commit_time = generations[0].commit_time;
--exact_commit_time.value;
intermediate_commit_time = exact_commit_time;
} else if (version_i < generations.size()) {
exact_commit_time = generations[0].commit_time;
intermediate_commit_time = exact_commit_time;
intermediate_commit_time.value--;
} else {
exact_commit_time = generations.back().commit_time;
exact_commit_time.value++;
intermediate_commit_time = exact_commit_time;
}
{
auto expected_generations =
span(generations).subspan(std::max(ptrdiff_t(0), version_i));
{
ListVersionsOptions list_versions_options;
list_versions_options.min_generation_number = generation_number;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
{
ListVersionsOptions list_versions_options;
list_versions_options.min_commit_time = exact_commit_time;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
{
ListVersionsOptions list_versions_options;
list_versions_options.min_commit_time = intermediate_commit_time;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
}
{
auto expected_generations =
span(generations)
.subspan(0,
std::min(ptrdiff_t(generations.size()), version_i + 1));
{
ListVersionsOptions list_versions_options;
list_versions_options.max_generation_number = generation_number;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
{
ListVersionsOptions list_versions_options;
list_versions_options.max_commit_time = exact_commit_time;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
{
auto expected_generations =
span(generations).subspan(0, std::max(ptrdiff_t(0), version_i));
ListVersionsOptions list_versions_options;
list_versions_options.max_commit_time = intermediate_commit_time;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
}
}
}
TEST(ReadVersionTest, VersionTreeArityLog2_1) {
TestVersioning({{"version_tree_arity_log2", 1}}, 10);
}
TEST(ReadVersionTest, VersionTreeArityLog2_2) {
TestVersioning({{"version_tree_arity_log2", 2}}, 10);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/non_distributed/read_version.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/read_version_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
189ba346-65b2-4a18-9e26-4f064de7f3b7 | cpp | google/tensorstore | dump | tensorstore/kvstore/ocdbt/format/dump.cc | tensorstore/kvstore/ocdbt/format/dump_test.cc | #include "tensorstore/kvstore/ocdbt/format/dump.h"
#include <map>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include <nlohmann/json.hpp>
#include "re2/re2.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_variant.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/ocdbt/config.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_ocdbt {
Result<LabeledIndirectDataReference> LabeledIndirectDataReference::Parse(
std::string_view s) {
LabeledIndirectDataReference r;
static LazyRE2 kPattern = {"([^:]+):([^:]*):([^:]*):([0-9]+):([0-9]+)"};
std::string_view label, encoded_base_path, encoded_relative_path;
if (!RE2::FullMatch(s, *kPattern, &label, &encoded_base_path,
&encoded_relative_path, &r.location.offset,
&r.location.length)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid indirect data reference: ", tensorstore::QuoteString(s)));
}
TENSORSTORE_ASSIGN_OR_RETURN(r.kind, ParseIndirectDataKind(label));
r.location.file_id.base_path = internal::PercentDecode(encoded_base_path);
r.location.file_id.relative_path =
internal::PercentDecode(encoded_relative_path);
TENSORSTORE_RETURN_IF_ERROR(r.location.Validate(false));
return r;
}
namespace {
namespace jb = tensorstore::internal_json_binding;
constexpr auto ConfigBinder = jb::Compose<ConfigConstraints>(
[](auto is_loading, const auto& options, auto* obj, auto* constraints) {
if constexpr (is_loading) {
CreateConfig(constraints, *obj);
if (ConfigConstraints(*obj) != *constraints) {
return absl::InvalidArgumentError("Config is not fully specified");
}
} else {
*constraints = ConfigConstraints(*obj);
}
return absl::OkStatus();
});
static inline constexpr internal::AsciiSet
kLabeledIndirectDataReferenceUnreservedChars{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_./"};
constexpr auto LabeledIndirectDataReferenceBinder =
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
if (auto* s = j->template get_ptr<const std::string*>()) {
TENSORSTORE_ASSIGN_OR_RETURN(*obj,
LabeledIndirectDataReference::Parse(*s));
} else {
return internal_json::ExpectedError(*j, "string");
}
} else {
if (obj->location.IsMissing()) {
*j = ::nlohmann::json::value_t::discarded;
} else {
*j = tensorstore::StrCat(
IndirectDataKindToString(obj->kind), ":",
internal::PercentEncodeReserved(
obj->location.file_id.base_path,
kLabeledIndirectDataReferenceUnreservedChars),
":",
internal::PercentEncodeReserved(
obj->location.file_id.relative_path,
kLabeledIndirectDataReferenceUnreservedChars),
":", obj->location.offset, ":", obj->location.length);
}
}
return absl::OkStatus();
};
constexpr auto IndirectDataReferenceBinder(IndirectDataKind kind) {
return jb::Compose<LabeledIndirectDataReference>(
[kind](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
*obj = j->location;
} else {
j->location = *obj;
j->kind = kind;
}
return absl::OkStatus();
},
LabeledIndirectDataReferenceBinder);
}
constexpr auto CommitTimeBinder = jb::Projection<&CommitTime::value>();
constexpr auto BtreeNodeStatisticsBinder = jb::Object(
jb::Member(
"num_indirect_value_bytes",
jb::Projection<&BtreeNodeStatistics::num_indirect_value_bytes>()),
jb::Member("num_tree_bytes",
jb::Projection<&BtreeNodeStatistics::num_tree_bytes>()),
jb::Member("num_keys", jb::Projection<&BtreeNodeStatistics::num_keys>()));
constexpr auto BtreeNodeReferenceBinder = jb::Object(
jb::Member("location",
jb::Projection<&BtreeNodeReference::location>(
IndirectDataReferenceBinder(IndirectDataKind::kBtreeNode))),
jb::Member("statistics", jb::Projection<&BtreeNodeReference::statistics>(
BtreeNodeStatisticsBinder)));
constexpr auto BtreeGenerationReferenceBinder = jb::Object(
jb::Member("root", jb::Projection<&BtreeGenerationReference::root>(
BtreeNodeReferenceBinder)),
jb::Member("generation_number",
jb::Projection<&BtreeGenerationReference::generation_number>()),
jb::Member("root_height",
jb::Projection<&BtreeGenerationReference::root_height>()),
jb::Member("commit_time",
jb::Projection<&BtreeGenerationReference::commit_time>(
CommitTimeBinder)));
constexpr auto VersionNodeReferenceBinder = jb::Object(
jb::Member("location", jb::Projection<&VersionNodeReference::location>(
IndirectDataReferenceBinder(
IndirectDataKind::kVersionNode))),
jb::Member("generation_number",
jb::Projection<&VersionNodeReference::generation_number>()),
jb::Member("height", jb::Projection<&VersionNodeReference::height>()),
jb::Member("num_generations",
jb::Projection<&VersionNodeReference::num_generations>()),
jb::Member(
"commit_time",
jb::Projection<&VersionNodeReference::commit_time>(CommitTimeBinder)));
constexpr auto ManifestBinder = jb::Object(
jb::Member("config", jb::Projection<&Manifest::config>(ConfigBinder)),
jb::Member("versions", jb::Projection<&Manifest::versions>(
jb::Array(BtreeGenerationReferenceBinder))),
jb::Member("version_tree_nodes",
jb::Projection<&Manifest::version_tree_nodes>(
jb::Array(VersionNodeReferenceBinder))));
constexpr auto BinaryCordBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) {
if constexpr (is_loading) {
if (auto* b = j->template get_ptr<const ::nlohmann::json::binary_t*>()) {
*obj = absl::Cord(std::string_view(
reinterpret_cast<const char*>(b->data()), b->size()));
return absl::OkStatus();
} else if (auto* s = j->template get_ptr<const std::string*>()) {
*obj = absl::Cord(*s);
return absl::OkStatus();
} else {
return internal_json::ExpectedError(*j, "string or byte string");
}
} else {
::nlohmann::json::binary_t v;
v.reserve(obj->size());
for (std::string_view chunk : obj->Chunks()) {
v.insert(v.end(), chunk.begin(), chunk.end());
}
*j = std::move(v);
return absl::OkStatus();
}
};
constexpr auto LeafNodeValueReferenceBinder = jb::Variant(
jb::Member("inline_value", BinaryCordBinder),
jb::Member("indirect_value",
IndirectDataReferenceBinder(IndirectDataKind::kValue)));
constexpr auto BtreeLeafNodeEntryBinder(std::string_view key_prefix) {
return
[=](std::false_type is_loading, const auto& options, auto* obj, auto* j) {
::nlohmann::json::binary_t key;
key.insert(key.end(), key_prefix.begin(), key_prefix.end());
key.insert(key.end(), obj->key.begin(), obj->key.end());
::nlohmann::json::object_t x{{"key", key}};
TENSORSTORE_RETURN_IF_ERROR(LeafNodeValueReferenceBinder(
std::false_type{}, IncludeDefaults{}, &obj->value_reference, &x));
*j = std::move(x);
return absl::OkStatus();
};
}
constexpr auto BtreeInteriorNodeEntryBinder(std::string_view key_prefix) {
return [=](std::false_type is_loading, const auto& options, auto* obj,
auto* j) {
::nlohmann::json::binary_t key;
key.insert(key.end(), key_prefix.begin(), key_prefix.end());
key.insert(key.end(), obj->key.begin(), obj->key.end());
auto common_prefix = key;
common_prefix.resize(obj->subtree_common_prefix_length + key_prefix.size());
::nlohmann::json::object_t x;
TENSORSTORE_RETURN_IF_ERROR(BtreeNodeReferenceBinder(
std::false_type{}, IncludeDefaults{}, &obj->node, &x));
x["key"] = key;
x["subtree_common_prefix"] = common_prefix;
*j = std::move(x);
return absl::OkStatus();
};
}
constexpr auto BtreeNodeBinder = jb::Object(
jb::Member("height", jb::Projection<&BtreeNode::height>()),
jb::Member("entries",
[](auto is_loading, const auto& options, auto* obj, auto* j) {
return jb::Variant(
jb::Array(BtreeLeafNodeEntryBinder(obj->key_prefix)),
jb::Array(BtreeInteriorNodeEntryBinder(obj->key_prefix)))(
is_loading, options, &obj->entries, j);
}));
constexpr auto VersionTreeNodeBinder = jb::Object(
jb::Member("height", jb::Projection<&VersionTreeNode::height>()),
jb::Member("version_tree_arity_log2",
jb::Projection<&VersionTreeNode::version_tree_arity_log2>()),
jb::Member("entries", jb::Projection<&VersionTreeNode::entries>(jb::Variant(
jb::Array(BtreeGenerationReferenceBinder),
jb::Array(VersionNodeReferenceBinder)))));
}
::nlohmann::json Dump(const Manifest& manifest) {
return jb::ToJson(manifest, ManifestBinder).value();
}
::nlohmann::json Dump(const BtreeNode& node) {
return jb::ToJson(node, BtreeNodeBinder).value();
}
::nlohmann::json Dump(const VersionTreeNode& node) {
return jb::ToJson(node, VersionTreeNodeBinder).value();
}
}
} | #include "tensorstore/kvstore/ocdbt/format/dump.h"
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_ocdbt::BtreeNode;
using ::tensorstore::internal_ocdbt::CommitTime;
using ::tensorstore::internal_ocdbt::DataFileId;
using ::tensorstore::internal_ocdbt::Dump;
using ::tensorstore::internal_ocdbt::IndirectDataKind;
using ::tensorstore::internal_ocdbt::IndirectDataReference;
using ::tensorstore::internal_ocdbt::LabeledIndirectDataReference;
using ::tensorstore::internal_ocdbt::Manifest;
TEST(LabeledIndirectDataReferenceTest, ParseBtreeNode) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value,
LabeledIndirectDataReference::Parse("btreenode:abc:def%20:1:36"));
EXPECT_EQ(IndirectDataKind::kBtreeNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(1, value.location.offset);
EXPECT_EQ(36, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, ParseValue) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value, LabeledIndirectDataReference::Parse("value:abc:def%20:1:36"));
EXPECT_EQ(IndirectDataKind::kValue, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(1, value.location.offset);
EXPECT_EQ(36, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, ParseVersionNode) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value,
LabeledIndirectDataReference::Parse("versionnode:abc:def%20:1:36"));
EXPECT_EQ(IndirectDataKind::kVersionNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(1, value.location.offset);
EXPECT_EQ(36, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, MaxOffset) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value, LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775807:0"));
EXPECT_EQ(IndirectDataKind::kBtreeNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(9223372036854775807, value.location.offset);
EXPECT_EQ(0, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, MaxOffsetAndLength) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value, LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775806:1"));
EXPECT_EQ(IndirectDataKind::kBtreeNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(9223372036854775806, value.location.offset);
EXPECT_EQ(1, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, OffsetTooLarge) {
EXPECT_THAT(
LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775808:0"),
MatchesStatus(absl::StatusCode::kDataLoss, "Invalid offset/length .*"));
}
TEST(LabeledIndirectDataReferenceTest, InvalidKind) {
EXPECT_THAT(LabeledIndirectDataReference::Parse("abc:abc:def:0:10"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid indirect data kind: abc"));
}
TEST(LabeledIndirectDataReferenceTest, LengthTooLarge) {
EXPECT_THAT(
LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775807:1"),
MatchesStatus(absl::StatusCode::kDataLoss, "Invalid offset/length .*"));
}
TEST(DumpTest, Manifest) {
Manifest manifest;
manifest.config.uuid = {
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}};
manifest.config.version_tree_arity_log2 = 1;
{
auto& x = manifest.versions.emplace_back();
x.root.location.file_id = {"abc", "def"};
x.root.location.offset = 10;
x.root.location.length = 42;
x.generation_number = 15;
x.root.statistics.num_indirect_value_bytes = 101;
x.root.statistics.num_tree_bytes = 220;
x.root.statistics.num_keys = 8;
x.root_height = 0;
x.commit_time = CommitTime{10};
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id = {"abc", "def"};
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 8;
x.height = 3;
x.commit_time = CommitTime{1};
x.num_generations = 8;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id = {"abc", "def"};
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 12;
x.height = 2;
x.commit_time = CommitTime{5};
x.num_generations = 4;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id = {"abc", "def"};
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 14;
x.height = 1;
x.commit_time = CommitTime{8};
x.num_generations = 2;
}
EXPECT_THAT(Dump(manifest),
MatchesJson({
{"config",
{{"uuid", "000102030405060708090a0b0c0d0e0f"},
{"compression", {{"id", "zstd"}}},
{"max_decoded_node_bytes", 8388608},
{"max_inline_value_bytes", 100},
{"version_tree_arity_log2", 1}}},
{"version_tree_nodes",
{{
{"commit_time", 1},
{"generation_number", 8},
{"height", 3},
{"location", "versionnode:abc:def:10:42"},
{"num_generations", 8},
},
{
{"commit_time", 5},
{"generation_number", 12},
{"height", 2},
{"location", "versionnode:abc:def:10:42"},
{"num_generations", 4},
},
{
{"commit_time", 8},
{"generation_number", 14},
{"height", 1},
{"location", "versionnode:abc:def:10:42"},
{"num_generations", 2},
}}},
{"versions",
{{{"commit_time", 10},
{"root",
{{"location", "btreenode:abc:def:10:42"},
{"statistics",
{{"num_indirect_value_bytes", 101},
{"num_keys", 8},
{"num_tree_bytes", 220}}}}},
{"generation_number", 15},
{"root_height", 0}}}},
}));
}
TEST(DumpTest, BtreeLeafNode) {
BtreeNode node;
node.height = 0;
node.key_prefix = "ab";
auto& entries = node.entries.emplace<BtreeNode::LeafNodeEntries>();
entries.push_back({"c",
absl::Cord("value1")});
entries.push_back({"d",
absl::Cord("value2")});
entries.push_back({"e",
IndirectDataReference{{"abc", "def"}, 1, 25}});
EXPECT_THAT(
Dump(node),
MatchesJson({
{"entries",
{
{
{"inline_value",
::nlohmann::json::binary_t{
std::vector<uint8_t>{'v', 'a', 'l', 'u', 'e', '1'}}},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'c'}}},
},
{
{"inline_value",
::nlohmann::json::binary_t{
std::vector<uint8_t>{'v', 'a', 'l', 'u', 'e', '2'}}},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'd'}}},
},
{
{"indirect_value", "value:abc:def:1:25"},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'e'}}},
},
}},
{"height", 0},
}));
}
TEST(DumpTest, BtreeInteriorNode) {
BtreeNode node;
node.height = 2;
auto& entries = node.entries.emplace<BtreeNode::InteriorNodeEntries>();
entries.push_back({"abc",
1,
{
{
{"abc", "def"},
5,
6,
},
{
100,
200,
5,
},
}});
entries.push_back({"def",
1,
{
{
{"ghi", "jkl"},
42,
9,
},
{
101,
220,
8,
},
}});
EXPECT_THAT(
Dump(node),
MatchesJson({
{"entries",
{
{{"location", "btreenode:abc:def:5:6"},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'c'}}},
{"subtree_common_prefix",
::nlohmann::json::binary_t{std::vector<uint8_t>{'a'}}},
{
"statistics",
{{"num_indirect_value_bytes", 100},
{"num_keys", 5},
{"num_tree_bytes", 200}},
}},
{
{"location", "btreenode:ghi:jkl:42:9"},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'd', 'e', 'f'}}},
{"subtree_common_prefix",
::nlohmann::json::binary_t{std::vector<uint8_t>{'d'}}},
{"statistics",
{{"num_indirect_value_bytes", 101},
{"num_keys", 8},
{"num_tree_bytes", 220}}},
},
}},
{"height", 2},
}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/dump.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/dump_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
618654a3-d58e-4f05-82a9-934801a00f6f | cpp | google/tensorstore | data_file_id_codec | tensorstore/kvstore/ocdbt/format/data_file_id_codec.cc | tensorstore/kvstore/ocdbt/format/data_file_id_codec_test.cc | #include "tensorstore/kvstore/ocdbt/format/data_file_id_codec.h"
#include <algorithm>
#include <string_view>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/varint/varint_reading.h"
#include "riegeli/varint/varint_writing.h"
#include "tensorstore/internal/ref_counted_string.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
using PathLengthCodec = VarintCodec<PathLength>;
}
void DataFileTableBuilder::Add(const DataFileId& data_file_id) {
data_files_.emplace(data_file_id, 0);
}
bool DataFileTableBuilder::Finalize(riegeli::Writer& writer) {
if (!riegeli::WriteVarint64(data_files_.size(), writer)) return false;
if (data_files_.empty()) return true;
std::vector<DataFileId> sorted_data_files;
sorted_data_files.reserve(data_files_.size());
for (const auto& p : data_files_) {
sorted_data_files.push_back(p.first);
}
std::sort(sorted_data_files.begin(), sorted_data_files.end(),
[&](const DataFileId& a, const DataFileId& b) {
if (int c = std::string_view(a.base_path)
.compare(std::string_view(b.base_path));
c != 0) {
return c < 0;
}
return std::string_view(a.relative_path) <
std::string_view(b.relative_path);
});
std::vector<size_t> prefix_lengths(sorted_data_files.size());
prefix_lengths[0] = 0;
for (size_t i = 1; i < sorted_data_files.size(); ++i) {
auto& cur = sorted_data_files[i];
auto& prev = sorted_data_files[i - 1];
std::string_view prev_base_path = prev.base_path;
std::string_view cur_base_path = cur.base_path;
size_t prefix_length =
FindCommonPrefixLength(prev_base_path, cur_base_path);
if (prev_base_path.size() == cur_base_path.size() &&
cur_base_path.size() == prefix_length) {
prefix_length +=
FindCommonPrefixLength(prev.relative_path, cur.relative_path);
}
prefix_lengths[i] = prefix_length;
if (!PathLengthCodec{}(writer, prefix_length)) return false;
}
for (size_t i = 0; i < sorted_data_files.size(); ++i) {
const auto& data_file = sorted_data_files[i];
assert(data_file.base_path.size() + data_file.relative_path.size() <=
kMaxPathLength);
if (!PathLengthCodec{}(writer, data_file.base_path.size() +
data_file.relative_path.size() -
prefix_lengths[i])) {
return false;
}
}
for (size_t i = 0; i < sorted_data_files.size(); ++i) {
const auto& data_file = sorted_data_files[i];
if (!PathLengthCodec{}(writer, data_file.base_path.size())) {
return false;
}
}
for (size_t i = 0; i < sorted_data_files.size(); ++i) {
const auto& data_file = sorted_data_files[i];
size_t prefix_length = prefix_lengths[i];
std::string_view base_path = data_file.base_path;
size_t base_path_prefix_length = std::min(prefix_length, base_path.size());
if (!writer.Write(base_path.substr(base_path_prefix_length))) return false;
std::string_view relative_path = data_file.relative_path;
if (!writer.Write(
relative_path.substr(prefix_length - base_path_prefix_length))) {
return false;
}
auto it = data_files_.find(data_file);
assert(it != data_files_.end());
it->second = i;
}
return true;
}
size_t DataFileTableBuilder::GetIndex(const DataFileId& data_file_id) const {
return data_files_.at(data_file_id);
}
void DataFileTableBuilder::Clear() { data_files_.clear(); }
[[nodiscard]] bool ReadDataFileTable(riegeli::Reader& reader,
const BasePath& transitive_path,
DataFileTable& value) {
ABSL_CHECK_LE(transitive_path.size(), kMaxPathLength);
std::string_view transitive_path_sv = transitive_path;
const size_t max_path_length = kMaxPathLength - transitive_path_sv.size();
uint64_t num_files;
if (!riegeli::ReadVarint64(reader, num_files)) return false;
std::vector<PathLength> path_length_buffer;
constexpr uint64_t kMaxReserve = 1024;
path_length_buffer.reserve(std::min(kMaxReserve, num_files) * 3);
path_length_buffer.push_back(0);
for (uint64_t i = 1; i < num_files; ++i) {
PathLength prefix_length;
if (!PathLengthCodec{}(reader, prefix_length)) return false;
path_length_buffer.push_back(prefix_length);
}
for (uint64_t i = 0; i < num_files; ++i) {
PathLength suffix_length;
if (!PathLengthCodec{}(reader, suffix_length)) return false;
path_length_buffer.push_back(suffix_length);
}
PathLength prev_base_path_length = 0;
for (uint64_t i = 0; i < num_files; ++i) {
PathLength base_path_length;
if (!PathLengthCodec{}(reader, base_path_length)) return false;
size_t prefix_length = path_length_buffer[i];
size_t suffix_length = path_length_buffer[num_files + i];
size_t path_length = prefix_length + suffix_length;
if (path_length > max_path_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"path_length[%d] = prefix_length(%d) + "
"suffix_length(%d) = %d > %d - transitive_length(%d) = %d",
i, prefix_length, suffix_length, path_length, kMaxPathLength,
transitive_path.size(), max_path_length)));
return false;
}
if (base_path_length > path_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"base_path_length[%d] = %d > path_length[%d] = %d = "
"prefix_length(%d) + suffix_length(%d)",
i, base_path_length, i, path_length, prefix_length, suffix_length)));
return false;
}
if (prefix_length > std::min(prev_base_path_length, base_path_length) &&
base_path_length != prev_base_path_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"path_prefix_length[%d] = %d > "
"min(base_path_length[%d] = %d, base_path_length[%d] = %d) is not "
"valid if "
"base_path_length[%d] != base_path_length[%d]",
i - 1, prefix_length,
i - 1, prev_base_path_length,
i, base_path_length,
i - 1, i)));
return false;
}
path_length_buffer.push_back(base_path_length);
prev_base_path_length = base_path_length;
}
auto& files = value.files;
files.resize(num_files);
size_t prev_relative_path_length = 0;
for (uint64_t i = 0; i < num_files; ++i) {
size_t prefix_length = path_length_buffer[i];
size_t suffix_length = path_length_buffer[num_files + i];
size_t base_path_length = path_length_buffer[2 * num_files + i];
size_t relative_path_length =
prefix_length + suffix_length - base_path_length;
if (!reader.Pull(suffix_length)) return false;
auto& file = files[i];
if (base_path_length == 0) {
file.base_path = transitive_path;
} else if (prefix_length >= base_path_length) {
assert(files[i - 1].base_path.size() ==
base_path_length + transitive_path.size());
file.base_path = files[i - 1].base_path;
prefix_length -= base_path_length;
} else {
internal::RefCountedStringWriter writer(base_path_length +
transitive_path_sv.size());
std::memcpy(writer.data(), transitive_path_sv.data(),
transitive_path_sv.size());
size_t offset = transitive_path_sv.size();
size_t base_suffix_length = base_path_length > prefix_length
? base_path_length - prefix_length
: 0;
if (prefix_length > 0) {
std::string_view prev_base_path = files[i - 1].base_path;
prev_base_path.remove_prefix(transitive_path_sv.size());
size_t base_prefix_length = std::min(prefix_length, base_path_length);
assert(base_prefix_length <= prev_base_path.size());
std::memcpy(writer.data() + offset, prev_base_path.data(),
base_prefix_length);
offset += base_prefix_length;
prefix_length -= base_prefix_length;
}
if (base_suffix_length) {
std::memcpy(writer.data() + offset, reader.cursor(),
base_suffix_length);
reader.move_cursor(base_suffix_length);
suffix_length -= base_suffix_length;
}
file.base_path = std::move(writer);
}
if (relative_path_length == 0) {
assert(suffix_length == 0);
prev_relative_path_length = 0;
continue;
}
if (suffix_length == 0 &&
relative_path_length == prev_relative_path_length) {
assert(file.base_path == files[i - 1].base_path);
file.relative_path = files[i - 1].relative_path;
continue;
}
internal::RefCountedStringWriter writer(relative_path_length);
size_t offset = 0;
if (prefix_length) {
assert(file.base_path == files[i - 1].base_path);
assert(prefix_length <= relative_path_length);
std::memcpy(writer.data(), files[i - 1].relative_path.data(),
prefix_length);
offset += prefix_length;
}
if (suffix_length > 0) {
assert(offset + suffix_length == relative_path_length);
std::memcpy(writer.data() + offset, reader.cursor(), suffix_length);
reader.move_cursor(suffix_length);
}
file.relative_path = std::move(writer);
prev_relative_path_length = relative_path_length;
}
return true;
}
[[nodiscard]] bool DataFileIdCodec<riegeli::Reader>::operator()(
riegeli::Reader& reader, DataFileId& value) const {
uint64_t index;
if (!DataFileIndexCodec{}(reader, index)) return false;
if (index >= data_file_table.files.size()) {
reader.Fail(absl::DataLossError(
absl::StrFormat("Data file id %d is outside range [0, %d)", index,
data_file_table.files.size())));
return false;
}
value = data_file_table.files[index];
return true;
}
}
} | #include "tensorstore/kvstore/ocdbt/format/data_file_id_codec.h"
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/varint/varint_writing.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal_ocdbt::BasePath;
using ::tensorstore::internal_ocdbt::DataFileId;
using ::tensorstore::internal_ocdbt::DataFileTable;
using ::tensorstore::internal_ocdbt::DataFileTableBuilder;
using ::tensorstore::internal_ocdbt::FinalizeReader;
using ::tensorstore::internal_ocdbt::FinalizeWriter;
using ::tensorstore::internal_ocdbt::kMaxPathLength;
using ::tensorstore::internal_ocdbt::ReadDataFileTable;
Result<absl::Cord> Encode(const DataFileTable& table) {
DataFileTableBuilder builder;
for (const auto& file : table.files) {
builder.Add(file);
}
absl::Cord cord;
{
riegeli::CordWriter writer{&cord};
bool success = builder.Finalize(writer);
TENSORSTORE_RETURN_IF_ERROR(FinalizeWriter(writer, success));
}
return cord;
}
Result<DataFileTable> Decode(const absl::Cord& cord,
const BasePath& base_path = {}) {
DataFileTable new_table;
{
riegeli::CordReader reader{&cord};
bool success = ReadDataFileTable(reader, base_path, new_table);
TENSORSTORE_RETURN_IF_ERROR(FinalizeReader(reader, success));
}
return new_table;
}
Result<DataFileTable> RoundTrip(const DataFileTable& table,
const BasePath& base_path = {}) {
TENSORSTORE_ASSIGN_OR_RETURN(auto cord, Encode(table));
return Decode(cord, base_path);
}
TEST(DataFileBuilderTest, Empty) {
DataFileTable table;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, Encode(table));
EXPECT_EQ(1, encoded.size());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_table, Decode(encoded));
EXPECT_EQ(table.files, new_table.files);
}
TEST(DataFileBuilderTest, Simple) {
DataFileTable table;
table.files = {
{"b", "d"}, {"a", "c"}, {"a", "b"}, {"b", "e"}, {"b", "d"},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_table, RoundTrip(table, ""));
ASSERT_THAT(new_table.files, ::testing::ElementsAreArray({
DataFileId{"a", "b"},
DataFileId{"a", "c"},
DataFileId{"b", "d"},
DataFileId{"b", "e"},
}));
}
TEST(DataFileBuilderTest, Prefixes) {
DataFileTable table;
table.files = {
{"", ""},
{"", "a"},
{"", "ab"},
{"", "ac"},
{"a", ""},
{"a", "x"},
{"a", "xy"},
{"a", "xyz"},
{"ab", ""},
{"ab", "xy"},
{"ab", "xyz"},
{"ac", "xy"},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_table, RoundTrip(table, ""));
ASSERT_THAT(new_table.files, ::testing::ElementsAreArray(table.files));
}
TEST(DataFileBuilderTest, AddBasePath) {
DataFileTable table;
table.files = {
{"b", "d"}, {"a", "c"}, {"a", "b"}, {"b", "e"}, {"b", "d"}, {"", "y"},
};
BasePath base_path = "x/";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_table, RoundTrip(table, base_path));
ASSERT_THAT(new_table.files, ::testing::ElementsAreArray({
DataFileId{"x/", "y"},
DataFileId{"x/a", "b"},
DataFileId{"x/a", "c"},
DataFileId{"x/b", "d"},
DataFileId{"x/b", "e"},
}));
EXPECT_EQ(base_path.data(), new_table.files[0].base_path.data());
EXPECT_EQ(new_table.files[1].base_path.data(),
new_table.files[2].base_path.data());
EXPECT_EQ(new_table.files[3].base_path.data(),
new_table.files[4].base_path.data());
}
TEST(DataFileBuilderTest, Truncated) {
DataFileTable table;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, Encode(table));
ASSERT_EQ(1, encoded.size());
EXPECT_THAT(Decode(encoded.Subcord(0, 0)),
MatchesStatus(absl::StatusCode::kDataLoss));
}
TEST(DataFileBuilderTest, BasePathTooLongWithPrefix) {
DataFileTable table;
DataFileId long_id{std::string_view(std::string(kMaxPathLength, 'x'))};
table.files = {long_id};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, Encode(table));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto decoded, Decode(encoded));
ASSERT_EQ(table.files, decoded.files);
EXPECT_THAT(Decode(encoded, "z"),
MatchesStatus(absl::StatusCode::kDataLoss, "path_length.*"));
}
TEST(DataFileBuilderTest, SuffixLengthTooLong) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(1, writer));
ASSERT_TRUE(riegeli::WriteVarint64(kMaxPathLength + 1, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded), MatchesStatus(absl::StatusCode::kDataLoss,
"Invalid 16-bit varint value.*"));
}
TEST(DataFileBuilderTest, BasePathLengthTooLong) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(1, writer));
ASSERT_TRUE(riegeli::WriteVarint64(5, writer));
ASSERT_TRUE(riegeli::WriteVarint64(65536, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded), MatchesStatus(absl::StatusCode::kDataLoss,
"Invalid 16-bit varint value.*"));
}
TEST(DataFileBuilderTest, PrefixLengthTooLong) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(2, writer));
ASSERT_TRUE(riegeli::WriteVarint64(kMaxPathLength + 1, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded), MatchesStatus(absl::StatusCode::kDataLoss,
"Invalid 16-bit varint value.*"));
}
TEST(DataFileBuilderTest, BasePathLongerThanPath) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(1, writer));
ASSERT_TRUE(riegeli::WriteVarint64(5, writer));
ASSERT_TRUE(riegeli::WriteVarint64(6, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded),
MatchesStatus(absl::StatusCode::kDataLoss, "base_path_length.*"));
}
TEST(DataFileBuilderTest, PrefixLengthLongerThanPrevBasePath) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(2, writer));
ASSERT_TRUE(riegeli::WriteVarint64(2, writer));
ASSERT_TRUE(riegeli::WriteVarint64(2, writer));
ASSERT_TRUE(riegeli::WriteVarint64(0, writer));
ASSERT_TRUE(riegeli::WriteVarint64(0, writer));
ASSERT_TRUE(riegeli::WriteVarint64(1, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded), MatchesStatus(absl::StatusCode::kDataLoss,
"path_prefix_length.*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/data_file_id_codec.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/data_file_id_codec_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
753e4b82-adbd-4336-a554-86e584815eb5 | cpp | google/tensorstore | btree | tensorstore/kvstore/ocdbt/format/btree.cc | tensorstore/kvstore/ocdbt/format/btree_test.cc | #include "tensorstore/kvstore/ocdbt/format/btree.h"
#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
#include <memory>
#include <ostream>
#include <string>
#include <string_view>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/reader.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/kvstore/ocdbt/format/btree_codec.h"
#include "tensorstore/kvstore/ocdbt/format/codec_util.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id_codec.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference_codec.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
bool ReadKeyPrefixLengths(riegeli::Reader& reader,
span<KeyLength> prefix_lengths,
KeyLength& common_prefix_length) {
KeyLength min_prefix_length = kMaxKeyLength;
for (auto& prefix_length : prefix_lengths) {
if (!KeyLengthCodec{}(reader, prefix_length)) return false;
min_prefix_length = std::min(min_prefix_length, prefix_length);
}
common_prefix_length = min_prefix_length;
return true;
}
bool ReadKeySuffixLengths(riegeli::Reader& reader,
span<KeyLength> suffix_lengths) {
for (auto& length : suffix_lengths) {
if (!KeyLengthCodec{}(reader, length)) return false;
}
return true;
}
template <typename Entry>
bool ReadKeys(riegeli::Reader& reader, std::string_view& common_prefix,
BtreeNode::KeyBuffer& key_buffer, span<Entry> entries) {
const size_t num_entries = entries.size();
KeyLength common_prefix_length;
std::vector<KeyLength> key_length_buffer(num_entries * 2);
span<KeyLength> prefix_lengths(key_length_buffer.data(), num_entries);
span<KeyLength> suffix_lengths(key_length_buffer.data() + num_entries,
num_entries);
if (!ReadKeyPrefixLengths(reader, prefix_lengths.subspan(1),
common_prefix_length)) {
return false;
}
if (!ReadKeySuffixLengths(reader, suffix_lengths)) return false;
if constexpr (std::is_same_v<Entry, InteriorNodeEntry>) {
for (auto& entry : entries) {
if (!KeyLengthCodec{}(reader, entry.subtree_common_prefix_length)) {
return false;
}
common_prefix_length =
std::min(common_prefix_length, entry.subtree_common_prefix_length);
}
}
common_prefix_length = std::min(suffix_lengths[0], common_prefix_length);
size_t key_buffer_size = common_prefix_length;
for (size_t i = 0, prev_length = 0; i < num_entries; ++i) {
size_t prefix_length = prefix_lengths[i];
if (prefix_length > prev_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"Child %d: Prefix length of %d exceeds previous key length %d", i,
prefix_length, prev_length)));
return false;
}
size_t suffix_length = suffix_lengths[i];
size_t key_length = prefix_length + suffix_length;
if (key_length > kMaxKeyLength) {
reader.Fail(absl::DataLossError(
absl::StrFormat("Child %d: Key length %d exceeds limit of %d", i,
key_length, kMaxKeyLength)));
return false;
}
if constexpr (std::is_same_v<Entry, InteriorNodeEntry>) {
auto& entry = entries[i];
if (entry.subtree_common_prefix_length > key_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"Key %d: subtree common prefix length of %d exceeds key length of "
"%d",
i, entry.subtree_common_prefix_length, key_length)));
return false;
}
assert(entry.subtree_common_prefix_length >= common_prefix_length);
entry.subtree_common_prefix_length -= common_prefix_length;
}
prev_length = key_length;
key_buffer_size += key_length - common_prefix_length;
}
key_buffer = BtreeNode::KeyBuffer(key_buffer_size);
char* key_buffer_ptr = key_buffer.data.get();
const auto append_key_data = [&](auto... parts) {
std::string_view s(key_buffer_ptr, (parts.size() + ...));
(static_cast<void>(std::memcpy(key_buffer_ptr, parts.data(), parts.size()),
key_buffer_ptr += parts.size()),
...);
return s;
};
{
size_t key_length = suffix_lengths[0];
if (!reader.Pull(key_length)) return false;
auto full_first_key =
append_key_data(std::string_view(reader.cursor(), key_length));
common_prefix = full_first_key.substr(0, common_prefix_length);
entries[0].key = full_first_key.substr(common_prefix_length);
reader.move_cursor(key_length);
}
for (size_t i = 1; i < num_entries; ++i) {
size_t prefix_length = prefix_lengths[i] - common_prefix_length;
size_t suffix_length = suffix_lengths[i];
if (!reader.Pull(suffix_length)) return false;
auto prev_key = std::string_view(entries[i - 1].key);
auto suffix = std::string_view(reader.cursor(), suffix_length);
if (prev_key.substr(prefix_length) >= suffix) {
reader.Fail(absl::DataLossError("Invalid key order"));
return false;
}
entries[i].key = append_key_data(prev_key.substr(0, prefix_length), suffix);
reader.move_cursor(suffix_length);
}
return true;
}
template <typename Entry>
bool ReadBtreeNodeEntries(riegeli::Reader& reader,
const DataFileTable& data_file_table,
uint64_t num_entries, BtreeNode& node) {
auto& entries = node.entries.emplace<std::vector<Entry>>();
entries.resize(num_entries);
if (!ReadKeys<Entry>(reader, node.key_prefix, node.key_buffer, entries)) {
return false;
}
if constexpr (std::is_same_v<Entry, InteriorNodeEntry>) {
return BtreeNodeReferenceArrayCodec{data_file_table,
[](auto& entry) -> decltype(auto) {
return (entry.node);
}}(reader, entries);
} else {
return LeafNodeValueReferenceArrayCodec{data_file_table,
[](auto& entry) -> decltype(auto) {
return (entry.value_reference);
}}(reader, entries);
}
}
}
Result<BtreeNode> DecodeBtreeNode(const absl::Cord& encoded,
const BasePath& base_path) {
BtreeNode node;
auto status = DecodeWithOptionalCompression(
encoded, kBtreeNodeMagic, kBtreeNodeFormatVersion,
[&](riegeli::Reader& reader, uint32_t version) -> bool {
if (!reader.ReadByte(node.height)) return false;
DataFileTable data_file_table;
if (!ReadDataFileTable(reader, base_path, data_file_table)) {
return false;
}
uint32_t num_entries;
if (!ReadVarintChecked(reader, num_entries)) return false;
if (num_entries == 0) {
reader.Fail(absl::DataLossError("Empty b-tree node"));
return false;
}
if (num_entries > kMaxNodeArity) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"B-tree node has arity %d, which exceeds limit of %d",
num_entries, kMaxNodeArity)));
return false;
}
if (node.height == 0) {
return ReadBtreeNodeEntries<LeafNodeEntry>(reader, data_file_table,
num_entries, node);
} else {
return ReadBtreeNodeEntries<InteriorNodeEntry>(
reader, data_file_table, num_entries, node);
}
});
if (!status.ok()) {
return tensorstore::MaybeAnnotateStatus(status,
"Error decoding b-tree node");
}
#ifndef NDEBUG
CheckBtreeNodeInvariants(node);
#endif
return node;
}
absl::Status ValidateBtreeNodeReference(const BtreeNode& node,
BtreeNodeHeight height,
std::string_view inclusive_min_key) {
if (node.height != height) {
return absl::DataLossError(absl::StrFormat(
"Expected height of %d but received: %d", height, node.height));
}
return std::visit(
[&](auto& entries) {
if (ComparePrefixedKeyToUnprefixedKey{node.key_prefix}(
entries.front().key, inclusive_min_key) < 0) {
return absl::DataLossError(
tensorstore::StrCat("First key ",
tensorstore::QuoteString(tensorstore::StrCat(
node.key_prefix, entries.front().key)),
" is less than inclusive_min ",
tensorstore::QuoteString(inclusive_min_key),
" specified by parent node"));
}
return absl::OkStatus();
},
node.entries);
}
bool operator==(const BtreeNodeStatistics& a, const BtreeNodeStatistics& b) {
return a.num_indirect_value_bytes == b.num_indirect_value_bytes &&
a.num_tree_bytes == b.num_tree_bytes && a.num_keys == b.num_keys;
}
std::ostream& operator<<(std::ostream& os, const BtreeNodeStatistics& x) {
return os << "{num_indirect_value_bytes=" << x.num_indirect_value_bytes
<< ", num_tree_bytes=" << x.num_tree_bytes
<< ", num_keys=" << x.num_keys << "}";
}
BtreeNodeStatistics& BtreeNodeStatistics::operator+=(
const BtreeNodeStatistics& other) {
num_indirect_value_bytes = internal::AddSaturate(
num_indirect_value_bytes, other.num_indirect_value_bytes);
num_tree_bytes = internal::AddSaturate(num_tree_bytes, other.num_tree_bytes);
num_keys = internal::AddSaturate(num_keys, other.num_keys);
return *this;
}
bool operator==(const LeafNodeEntry& a, const LeafNodeEntry& b) {
return a.key == b.key && a.value_reference == b.value_reference;
}
std::ostream& operator<<(std::ostream& os, const LeafNodeValueReference& x) {
if (auto* value = std::get_if<absl::Cord>(&x)) {
return os << tensorstore::QuoteString(std::string(*value));
} else {
return os << std::get<IndirectDataReference>(x);
}
}
std::ostream& operator<<(std::ostream& os, const LeafNodeEntry& e) {
return os << "{key=" << tensorstore::QuoteString(e.key)
<< ", value_reference=" << e.value_reference << "}";
}
bool operator==(const BtreeNodeReference& a, const BtreeNodeReference& b) {
return a.location == b.location && a.statistics == b.statistics;
}
std::ostream& operator<<(std::ostream& os, const BtreeNodeReference& x) {
return os << "{location=" << x.location << ", statistics=" << x.statistics
<< "}";
}
std::ostream& operator<<(std::ostream& os, const InteriorNodeEntry& e) {
return os << "{key=" << tensorstore::QuoteString(e.key)
<< ", subtree_common_prefix_length="
<< e.subtree_common_prefix_length << ", node=" << e.node << "}";
}
const LeafNodeEntry* FindBtreeEntry(span<const LeafNodeEntry> entries,
std::string_view key) {
const LeafNodeEntry* entry = FindBtreeEntryLowerBound(entries, key);
if (entry == entries.data() + entries.size() || entry->key != key) {
return nullptr;
}
return entry;
}
const LeafNodeEntry* FindBtreeEntryLowerBound(span<const LeafNodeEntry> entries,
std::string_view inclusive_min) {
return std::lower_bound(
entries.data(), entries.data() + entries.size(), inclusive_min,
[](const LeafNodeEntry& entry, std::string_view inclusive_min) {
return entry.key < inclusive_min;
});
}
span<const LeafNodeEntry> FindBtreeEntryRange(span<const LeafNodeEntry> entries,
std::string_view inclusive_min,
std::string_view exclusive_max) {
const LeafNodeEntry* lower = FindBtreeEntryLowerBound(entries, inclusive_min);
const LeafNodeEntry* upper = entries.data() + entries.size();
if (!exclusive_max.empty()) {
upper = std::lower_bound(
lower, upper, exclusive_max,
[](const LeafNodeEntry& entry, std::string_view exclusive_max) {
return entry.key < exclusive_max;
});
}
return {lower, upper};
}
const InteriorNodeEntry* FindBtreeEntry(span<const InteriorNodeEntry> entries,
std::string_view key) {
auto it = std::lower_bound(
entries.data(), entries.data() + entries.size(), key,
[](const InteriorNodeEntry& entry, std::string_view inclusive_min) {
return entry.key <= inclusive_min;
});
if (it == entries.data()) {
return nullptr;
}
return it - 1;
}
const InteriorNodeEntry* FindBtreeEntryLowerBound(
span<const InteriorNodeEntry> entries, std::string_view inclusive_min) {
auto it = std::lower_bound(
entries.data(), entries.data() + entries.size(), inclusive_min,
[](const InteriorNodeEntry& entry, std::string_view inclusive_min) {
return entry.key <= inclusive_min;
});
if (it != entries.data()) --it;
return it;
}
span<const InteriorNodeEntry> FindBtreeEntryRange(
span<const InteriorNodeEntry> entries, std::string_view inclusive_min,
std::string_view exclusive_max) {
const InteriorNodeEntry* lower =
FindBtreeEntryLowerBound(entries, inclusive_min);
const InteriorNodeEntry* upper = entries.data() + entries.size();
if (!exclusive_max.empty()) {
upper = std::lower_bound(
lower, upper, exclusive_max,
[](const InteriorNodeEntry& entry, std::string_view exclusive_max) {
return entry.key < exclusive_max;
});
}
return {lower, upper};
}
#ifndef NDEBUG
void CheckBtreeNodeInvariants(const BtreeNode& node) {
if (node.height == 0) {
assert(std::holds_alternative<BtreeNode::LeafNodeEntries>(node.entries));
auto& entries = std::get<BtreeNode::LeafNodeEntries>(node.entries);
assert(!entries.empty());
assert(entries.size() <= kMaxNodeArity);
for (size_t i = 0; i < entries.size(); ++i) {
auto& entry = entries[i];
if (auto* location =
std::get_if<IndirectDataReference>(&entry.value_reference)) {
assert(!location->IsMissing());
}
if (i != 0) {
assert(entry.key > entries[i - 1].key);
}
}
} else {
assert(
std::holds_alternative<BtreeNode::InteriorNodeEntries>(node.entries));
auto& entries = std::get<BtreeNode::InteriorNodeEntries>(node.entries);
assert(!entries.empty());
assert(entries.size() <= kMaxNodeArity);
for (size_t i = 0; i < entries.size(); ++i) {
auto& entry = entries[i];
assert(entry.subtree_common_prefix_length <= entry.key.size());
assert(!entry.node.location.IsMissing());
if (i != 0) {
assert(entry.key > entries[i - 1].key);
}
}
}
}
#endif
}
} | #include "tensorstore/kvstore/ocdbt/format/btree.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <string_view>
#include <type_traits>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/kvstore/ocdbt/format/btree_codec.h"
#include "tensorstore/kvstore/ocdbt/format/btree_node_encoder.h"
#include "tensorstore/kvstore/ocdbt/format/codec_util.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal_ocdbt::BtreeNode;
using ::tensorstore::internal_ocdbt::BtreeNodeEncoder;
using ::tensorstore::internal_ocdbt::Config;
using ::tensorstore::internal_ocdbt::DecodeBtreeNode;
using ::tensorstore::internal_ocdbt::EncodedNode;
using ::tensorstore::internal_ocdbt::InteriorNodeEntry;
using ::tensorstore::internal_ocdbt::kMaxNodeArity;
using ::tensorstore::internal_ocdbt::LeafNodeEntry;
Result<std::vector<EncodedNode>> EncodeExistingNode(const Config& config,
const BtreeNode& node) {
return std::visit(
[&](const auto& entries) {
using Entry = typename std::decay_t<decltype(entries)>::value_type;
BtreeNodeEncoder<Entry> encoder(config, node.height,
node.key_prefix);
for (const auto& entry : entries) {
encoder.AddEntry(true, Entry(entry));
}
return encoder.Finalize(false);
},
node.entries);
}
void TestBtreeNodeRoundTrip(const Config& config, const BtreeNode& node) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded_nodes,
EncodeExistingNode(config, node));
ASSERT_EQ(1, encoded_nodes.size());
auto& encoded_node = encoded_nodes[0];
EXPECT_EQ(node.key_prefix, encoded_node.info.inclusive_min_key.substr(
0, encoded_node.info.excluded_prefix_length));
SCOPED_TRACE(tensorstore::StrCat(
"data=",
tensorstore::QuoteString(std::string(encoded_node.encoded_node))));
std::visit(
[&](const auto& entries) {
using Entry = typename std::decay_t<decltype(entries)>::value_type;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded_node,
DecodeBtreeNode(encoded_nodes[0].encoded_node, {}));
EXPECT_EQ(node.key_prefix,
tensorstore::StrCat(
encoded_node.info.inclusive_min_key.substr(
0, encoded_node.info.excluded_prefix_length),
decoded_node.key_prefix));
EXPECT_THAT(decoded_node.entries,
::testing::VariantWith<std::vector<Entry>>(entries));
},
node.entries);
}
TEST(BtreeNodeTest, LeafNodeRoundTrip) {
Config config;
config.compression = Config::NoCompression{};
BtreeNode node;
node.height = 0;
node.key_prefix = "ab";
auto& entries = node.entries.emplace<BtreeNode::LeafNodeEntries>();
entries.push_back({"c",
absl::Cord("value1")});
entries.push_back({"d",
absl::Cord("value2")});
TestBtreeNodeRoundTrip(config, node);
}
TEST(BtreeNodeTest, InteriorNodeRoundTrip) {
Config config;
BtreeNode node;
node.height = 2;
auto& entries = node.entries.emplace<BtreeNode::InteriorNodeEntries>();
{
InteriorNodeEntry entry;
entry.key = "abc";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc";
entry.node.location.file_id.relative_path = "def";
entry.node.location.offset = 5;
entry.node.location.length = 6;
entry.node.statistics.num_indirect_value_bytes = 100;
entry.node.statistics.num_tree_bytes = 200;
entry.node.statistics.num_keys = 5;
entries.push_back(entry);
}
{
InteriorNodeEntry entry;
entry.key = "def";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc1";
entry.node.location.file_id.relative_path = "def1";
entry.node.location.offset = 42;
entry.node.location.length = 9;
entry.node.statistics.num_indirect_value_bytes = 101;
entry.node.statistics.num_tree_bytes = 220;
entry.node.statistics.num_keys = 8;
entries.push_back(entry);
}
TestBtreeNodeRoundTrip(config, node);
}
TEST(BtreeNodeTest, InteriorNodeBasePath) {
Config config;
BtreeNode node;
node.height = 2;
auto& entries = node.entries.emplace<BtreeNode::InteriorNodeEntries>();
{
InteriorNodeEntry entry;
entry.key = "abc";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc";
entry.node.location.file_id.relative_path = "def";
entry.node.location.offset = 5;
entry.node.location.length = 6;
entry.node.statistics.num_indirect_value_bytes = 100;
entry.node.statistics.num_tree_bytes = 200;
entry.node.statistics.num_keys = 5;
entries.push_back(entry);
}
{
InteriorNodeEntry entry;
entry.key = "def";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc1";
entry.node.location.file_id.relative_path = "def1";
entry.node.location.offset = 42;
entry.node.location.length = 9;
entry.node.statistics.num_indirect_value_bytes = 101;
entry.node.statistics.num_tree_bytes = 220;
entry.node.statistics.num_keys = 8;
entries.push_back(entry);
}
{
InteriorNodeEntry entry;
entry.key = "ghi";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc1";
entry.node.location.file_id.relative_path = "def2";
entry.node.location.offset = 43;
entry.node.location.length = 10;
entry.node.statistics.num_indirect_value_bytes = 102;
entry.node.statistics.num_tree_bytes = 230;
entry.node.statistics.num_keys = 9;
entries.push_back(entry);
}
{
InteriorNodeEntry entry;
entry.key = "jkl";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc1";
entry.node.location.file_id.relative_path = "def1";
entry.node.location.offset = 43;
entry.node.location.length = 10;
entry.node.statistics.num_indirect_value_bytes = 102;
entry.node.statistics.num_tree_bytes = 230;
entry.node.statistics.num_keys = 9;
entries.push_back(entry);
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded_nodes,
EncodeExistingNode(config, node));
ASSERT_EQ(1, encoded_nodes.size());
auto& encoded_node = encoded_nodes[0];
EXPECT_EQ(node.key_prefix, encoded_node.info.inclusive_min_key.substr(
0, encoded_node.info.excluded_prefix_length));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded_node,
DecodeBtreeNode(encoded_nodes[0].encoded_node, "xyz/"));
entries[0].node.location.file_id.base_path = "xyz/abc";
entries[1].node.location.file_id.base_path = "xyz/abc1";
entries[2].node.location.file_id.base_path = "xyz/abc1";
entries[3].node.location.file_id.base_path = "xyz/abc1";
EXPECT_THAT(decoded_node.entries,
::testing::VariantWith<std::vector<InteriorNodeEntry>>(entries));
}
absl::Cord EncodeRawBtree(const std::vector<unsigned char>& data) {
using ::tensorstore::internal_ocdbt::kBtreeNodeFormatVersion;
using ::tensorstore::internal_ocdbt::kBtreeNodeMagic;
Config config;
config.compression = Config::NoCompression{};
return EncodeWithOptionalCompression(
config, kBtreeNodeMagic, kBtreeNodeFormatVersion,
[&](riegeli::Writer& writer) -> bool {
return writer.Write(std::string_view(
reinterpret_cast<const char*>(data.data()), data.size()));
})
.value();
}
absl::Status RoundTripRawBtree(const std::vector<unsigned char>& data) {
return DecodeBtreeNode(EncodeRawBtree(data), {}).status();
}
TEST(BtreeNodeTest, CorruptTruncateBodyZeroSize) {
EXPECT_THAT(
RoundTripRawBtree({}),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error decoding b-tree node: Unexpected end of data; .*"));
}
TEST(BtreeNodeTest, CorruptLeafTruncatedNumEntries) {
EXPECT_THAT(
RoundTripRawBtree({
0,
}),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error decoding b-tree node: Unexpected end of data; .*"));
}
TEST(BtreeNodeTest, CorruptLeafZeroNumEntries) {
EXPECT_THAT(
RoundTripRawBtree({
0,
0,
0,
0,
}),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error decoding b-tree node: Empty b-tree node; .*"));
}
TEST(BtreeNodeTest, CorruptInteriorZeroNumEntries) {
EXPECT_THAT(
RoundTripRawBtree({
1,
0,
0,
0,
}),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error decoding b-tree node: Empty b-tree node; .*"));
}
TEST(BtreeNodeTest, MaxArity) {
Config config;
config.compression = Config::NoCompression{};
config.max_decoded_node_bytes = 1000000000;
BtreeNode node;
node.height = 0;
auto& entries = node.entries.emplace<BtreeNode::LeafNodeEntries>();
std::vector<std::string> keys;
for (size_t i = 0; i <= kMaxNodeArity; ++i) {
keys.push_back(absl::StrFormat("%07d", i));
}
std::sort(keys.begin(), keys.end());
const auto add_entry = [&](size_t i) {
entries.push_back({keys[i],
absl::Cord()});
};
for (size_t i = 0; i < kMaxNodeArity; ++i) {
add_entry(i);
}
TestBtreeNodeRoundTrip(config, node);
add_entry(kMaxNodeArity);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded_nodes,
EncodeExistingNode(config, node));
ASSERT_EQ(2, encoded_nodes.size());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded_node1,
DecodeBtreeNode(encoded_nodes[0].encoded_node, {}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded_node2,
DecodeBtreeNode(encoded_nodes[1].encoded_node, {}));
EXPECT_EQ(kMaxNodeArity / 2 + 1,
std::get<BtreeNode::LeafNodeEntries>(decoded_node1.entries).size());
EXPECT_EQ(kMaxNodeArity / 2,
std::get<BtreeNode::LeafNodeEntries>(decoded_node2.entries).size());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/btree.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/btree_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
804e3b77-9552-4354-8330-c9484f9ebfcd | cpp | google/tensorstore | manifest | tensorstore/kvstore/ocdbt/format/manifest.cc | tensorstore/kvstore/ocdbt/format/manifest_test.cc | #include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include <cassert>
#include <ostream>
#include <string>
#include <string_view>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/ocdbt/format/codec_util.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/config_codec.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id_codec.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree_codec.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_ocdbt {
constexpr uint32_t kManifestMagic = 0x0cdb3a2a;
constexpr uint8_t kManifestFormatVersion = 0;
void ForEachManifestVersionTreeNodeRef(
GenerationNumber generation_number, uint8_t version_tree_arity_log2,
absl::FunctionRef<void(GenerationNumber min_generation_number,
GenerationNumber max_generation_number,
VersionTreeHeight height)>
callback) {
generation_number = (generation_number - 1) >> version_tree_arity_log2
<< version_tree_arity_log2;
VersionTreeHeight height = 1;
while (generation_number) {
GenerationNumber next_generation_number =
(generation_number - 1)
>> (height + 1) * version_tree_arity_log2
<< (height + 1) * version_tree_arity_log2;
GenerationNumber min_generation_number = next_generation_number + 1;
callback(min_generation_number, generation_number, height);
++height;
generation_number = next_generation_number;
}
}
absl::Status ValidateManifestVersionTreeNodes(
VersionTreeArityLog2 version_tree_arity_log2,
GenerationNumber last_generation_number,
const std::vector<VersionNodeReference>& entries) {
const auto max_height = GetMaxVersionTreeHeight(version_tree_arity_log2);
for (size_t i = 0; i < entries.size(); ++i) {
auto& entry = entries[i];
if (entry.height == 0 || entry.height > max_height) {
return absl::DataLossError(absl::StrFormat(
"entry_height[%d] outside valid range [1, %d]", i, max_height));
}
if (entry.generation_number == 0) {
return absl::DataLossError(
absl::StrFormat("generation_number[%d] must be non-zero", i));
}
if (i > 0) {
if (entry.generation_number <= entries[i - 1].generation_number) {
return absl::DataLossError(absl::StrFormat(
"generation_number[%d]=%d <= generation_number[%d]=%d", i,
entry.generation_number, i - 1, entries[i - 1].generation_number));
}
if (entry.height >= entries[i - 1].height) {
return absl::DataLossError(
absl::StrFormat("entry_height[%d]=%d >= entry_height[%d]=%d", i,
entry.height, i - 1, entries[i - 1].height));
}
}
}
size_t i = entries.size();
absl::Status status;
ForEachManifestVersionTreeNodeRef(
last_generation_number, version_tree_arity_log2,
[&](GenerationNumber min_generation_number,
GenerationNumber max_generation_number, VersionTreeHeight height) {
if (!status.ok()) {
return;
}
if (i == 0) {
return;
}
auto& entry = entries[i - 1];
if (entry.height != height) {
return;
}
--i;
if (entry.generation_number < min_generation_number ||
entry.generation_number > max_generation_number) {
status = absl::DataLossError(
absl::StrFormat("generation_number[%d]=%d is outside expected "
"range [%d, %d] for height %d",
i, entry.generation_number, min_generation_number,
max_generation_number, entry.height));
}
});
if (!status.ok()) return status;
if (i != 0) {
return absl::DataLossError(
absl::StrFormat("Unexpected child with generation_number[%d]=%d and "
"entry_height[%d]=%d given last generation_number=%d",
i - 1, entries[i - 1].generation_number, i - 1,
entries[i - 1].height, last_generation_number));
}
return absl::OkStatus();
}
bool ReadManifestVersionTreeNodes(
riegeli::Reader& reader, VersionTreeArityLog2 version_tree_arity_log2,
const DataFileTable& data_file_table,
std::vector<VersionNodeReference>& version_tree_nodes,
GenerationNumber last_generation_number) {
const size_t max_num_entries =
GetMaxVersionTreeHeight(version_tree_arity_log2);
if (!VersionTreeInteriorNodeEntryArrayCodec<DataFileTable>{
data_file_table, max_num_entries, true}(
reader, version_tree_nodes)) {
return false;
}
TENSORSTORE_RETURN_IF_ERROR(
ValidateManifestVersionTreeNodes(
version_tree_arity_log2, last_generation_number, version_tree_nodes),
reader.Fail(_), false);
return true;
}
Result<absl::Cord> EncodeManifest(const Manifest& manifest,
bool encode_as_single) {
#ifndef NDEBUG
CheckManifestInvariants(manifest, encode_as_single);
#endif
return EncodeWithOptionalCompression(
manifest.config, kManifestMagic, kManifestFormatVersion,
[&](riegeli::Writer& writer) -> bool {
if (encode_as_single) {
Config new_config = manifest.config;
new_config.manifest_kind = ManifestKind::kSingle;
if (!ConfigCodec{}(writer, new_config)) return false;
} else {
if (!ConfigCodec{}(writer, manifest.config)) return false;
if (manifest.config.manifest_kind != ManifestKind::kSingle) {
return true;
}
}
DataFileTableBuilder data_file_table;
internal_ocdbt::AddDataFiles(data_file_table, manifest.versions);
internal_ocdbt::AddDataFiles(data_file_table,
manifest.version_tree_nodes);
if (!data_file_table.Finalize(writer)) return false;
if (!WriteVersionTreeNodeEntries(manifest.config, writer,
data_file_table, manifest.versions)) {
return false;
}
if (!VersionTreeInteriorNodeEntryArrayCodec<DataFileTableBuilder>{
data_file_table,
GetMaxVersionTreeHeight(
manifest.config.version_tree_arity_log2),
true}(writer, manifest.version_tree_nodes)) {
return false;
}
return true;
});
}
Result<Manifest> DecodeManifest(const absl::Cord& encoded) {
Manifest manifest;
auto status = DecodeWithOptionalCompression(
encoded, kManifestMagic, kManifestFormatVersion,
[&](riegeli::Reader& reader, uint32_t version) -> bool {
if (!ConfigCodec{}(reader, manifest.config)) return false;
if (manifest.config.manifest_kind != ManifestKind::kSingle) {
return true;
}
DataFileTable data_file_table;
if (!ReadDataFileTable(reader, {}, data_file_table)) {
return false;
}
if (!ReadVersionTreeLeafNode(manifest.config.version_tree_arity_log2,
reader, data_file_table,
manifest.versions)) {
return false;
}
if (!ReadManifestVersionTreeNodes(
reader, manifest.config.version_tree_arity_log2,
data_file_table, manifest.version_tree_nodes,
manifest.versions.back().generation_number)) {
return false;
}
return true;
});
if (!status.ok()) {
return tensorstore::MaybeAnnotateStatus(status, "Error decoding manifest");
}
#ifndef NDEBUG
CheckManifestInvariants(manifest);
#endif
return manifest;
}
bool operator==(const Manifest& a, const Manifest& b) {
return a.config == b.config && a.versions == b.versions &&
a.version_tree_nodes == b.version_tree_nodes;
}
std::ostream& operator<<(std::ostream& os, const Manifest& e) {
os << "{config=" << e.config;
if (e.config.manifest_kind == ManifestKind::kSingle) {
os << ", versions=" << tensorstore::span(e.versions)
<< ", version_tree_nodes=" << tensorstore::span(e.version_tree_nodes);
}
return os << "}";
}
std::string GetManifestPath(std::string_view base_path) {
return tensorstore::StrCat(base_path, "manifest.ocdbt");
}
std::string GetNumberedManifestPath(std::string_view base_path,
GenerationNumber generation_number) {
return absl::StrFormat("%smanifest.%016x", base_path, generation_number);
}
#ifndef NDEBUG
void CheckManifestInvariants(const Manifest& manifest, bool assume_single) {
assert(manifest.config.version_tree_arity_log2 > 0);
assert(manifest.config.version_tree_arity_log2 <= kMaxVersionTreeArityLog2);
if (manifest.config.manifest_kind == ManifestKind::kSingle || assume_single) {
TENSORSTORE_CHECK_OK(ValidateVersionTreeLeafNodeEntries(
manifest.config.version_tree_arity_log2, manifest.versions));
TENSORSTORE_CHECK_OK(ValidateManifestVersionTreeNodes(
manifest.config.version_tree_arity_log2,
manifest.versions.back().generation_number,
manifest.version_tree_nodes));
} else {
assert(manifest.versions.empty());
assert(manifest.version_tree_nodes.empty());
}
}
#endif
}
} | #include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal_ocdbt::CommitTime;
using ::tensorstore::internal_ocdbt::DecodeManifest;
using ::tensorstore::internal_ocdbt::Manifest;
Result<absl::Time> RoundTripCommitTime(absl::Time time) {
TENSORSTORE_ASSIGN_OR_RETURN(auto commit_time,
CommitTime::FromAbslTime(time));
return static_cast<absl::Time>(commit_time);
}
TEST(CommitTimeTest, Simple) {
EXPECT_THAT(RoundTripCommitTime(absl::FromUnixNanos(0)),
::testing::Optional(absl::FromUnixNanos(0)));
EXPECT_THAT(RoundTripCommitTime(absl::FromUnixNanos(-1)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(RoundTripCommitTime(
absl::FromUnixNanos(std::numeric_limits<int64_t>::max())),
::testing::Optional(
absl::FromUnixNanos(std::numeric_limits<int64_t>::max())));
EXPECT_THAT(RoundTripCommitTime(
absl::FromUnixNanos(std::numeric_limits<int64_t>::max()) +
absl::Nanoseconds(1)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
void TestManifestRoundTrip(const Manifest& manifest) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, EncodeManifest(manifest));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto decoded, DecodeManifest(encoded));
EXPECT_EQ(manifest, decoded);
}
Manifest GetSimpleManifest() {
Manifest manifest;
auto& x = manifest.versions.emplace_back();
x.root.location.file_id.base_path = "abc";
x.root.location.file_id.relative_path = "defgh";
x.root.location.offset = 10;
x.root.location.length = 42;
x.generation_number = 1;
x.root.statistics.num_indirect_value_bytes = 101;
x.root.statistics.num_tree_bytes = 220;
x.root.statistics.num_keys = 8;
x.root_height = 0;
x.commit_time = CommitTime{1};
return manifest;
}
TEST(ManifestTest, RoundTrip) { TestManifestRoundTrip(GetSimpleManifest()); }
TEST(ManifestTest, RoundTripNonZeroHeight) {
Manifest manifest;
{
auto& x = manifest.versions.emplace_back();
x.root.location.file_id.base_path = "abc";
x.root.location.file_id.relative_path = "defgh";
x.root.location.offset = 10;
x.root.location.length = 42;
x.generation_number = 1;
x.root.statistics.num_indirect_value_bytes = 101;
x.root.statistics.num_tree_bytes = 220;
x.root.statistics.num_keys = 8;
x.root_height = 5;
x.commit_time = CommitTime{1};
}
TestManifestRoundTrip(manifest);
}
TEST(ManifestTest, CorruptMagic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeManifest(GetSimpleManifest()));
absl::Cord corrupt = encoded;
corrupt.RemovePrefix(4);
corrupt.Prepend("abcd");
EXPECT_THAT(DecodeManifest(corrupt),
MatchesStatus(
absl::StatusCode::kDataLoss,
".*: Expected to start with hex bytes .* but received: .*"));
}
TEST(ManifestTest, CorruptLength) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeManifest(GetSimpleManifest()));
auto corrupt = encoded;
corrupt.Append("x");
EXPECT_THAT(
DecodeManifest(corrupt),
MatchesStatus(absl::StatusCode::kDataLoss, ".*: Length in header .*"));
}
TEST(ManifestTest, InvalidVersion) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeManifest(GetSimpleManifest()));
auto corrupt = encoded.Subcord(0, 12);
corrupt.Append(std::string(1, 1));
corrupt.Append(encoded.Subcord(13, -1));
EXPECT_THAT(
DecodeManifest(corrupt),
MatchesStatus(absl::StatusCode::kDataLoss,
".*: Maximum supported version is 0 but received: 1.*"));
}
TEST(ManifestTest, CorruptChecksum) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeManifest(GetSimpleManifest()));
auto corrupt = encoded;
auto sv = corrupt.Flatten();
unsigned char final_char = sv.back();
++final_char;
corrupt.RemoveSuffix(1);
corrupt.Append(std::string(1, final_char));
EXPECT_THAT(DecodeManifest(corrupt),
MatchesStatus(absl::StatusCode::kDataLoss,
".*: CRC-32C checksum verification failed.*"));
}
TEST(ManifestTest, RoundTripMultipleVersions) {
Manifest manifest;
manifest.config.version_tree_arity_log2 = 1;
{
auto& x = manifest.versions.emplace_back();
x.root.location.file_id.base_path = "abc";
x.root.location.file_id.relative_path = "defgh";
x.root.location.offset = 10;
x.root.location.length = 42;
x.generation_number = 15;
x.root.statistics.num_indirect_value_bytes = 101;
x.root.statistics.num_tree_bytes = 220;
x.root.statistics.num_keys = 8;
x.root_height = 0;
x.commit_time = CommitTime{10};
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id.base_path = "abc";
x.location.file_id.relative_path = "defgh";
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 8;
x.height = 3;
x.commit_time = CommitTime{1};
x.num_generations = 8;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id.base_path = "abc";
x.location.file_id.relative_path = "defgh1";
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 12;
x.height = 2;
x.commit_time = CommitTime{5};
x.num_generations = 4;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id.base_path = "abc1";
x.location.file_id.relative_path = "defgh";
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 14;
x.height = 1;
x.commit_time = CommitTime{8};
x.num_generations = 2;
}
TestManifestRoundTrip(manifest);
}
namespace for_each_manifest_version_tree_node_ref {
using ::tensorstore::internal_ocdbt::ForEachManifestVersionTreeNodeRef;
using ::tensorstore::internal_ocdbt::GenerationNumber;
using ::tensorstore::internal_ocdbt::VersionTreeArityLog2;
using R = std::tuple<GenerationNumber, GenerationNumber, int>;
std::vector<R> GetRanges(GenerationNumber generation_number,
VersionTreeArityLog2 version_tree_arity_log2) {
std::vector<R> results;
ForEachManifestVersionTreeNodeRef(
generation_number, version_tree_arity_log2,
[&](GenerationNumber min_generation_number,
GenerationNumber max_generation_number, VersionTreeArityLog2 height) {
results.emplace_back(min_generation_number, max_generation_number,
height);
});
return results;
}
TEST(ForEachManifestVersionTreeNodeRefTest, SimpleCases) {
EXPECT_THAT(GetRanges(8, 2), ::testing::ElementsAre(R{1, 4, 1}));
EXPECT_THAT(GetRanges(9, 2), ::testing::ElementsAre(R{1, 8, 1}));
EXPECT_THAT(GetRanges(17, 2), ::testing::ElementsAre(R{1, 16, 1}));
EXPECT_THAT(GetRanges(30, 2),
::testing::ElementsAre(R{17, 28, 1}, R{1, 16, 2}));
EXPECT_THAT(GetRanges(43, 2),
::testing::ElementsAre(R{33, 40, 1}, R{1, 32, 2}));
EXPECT_THAT(GetRanges(17, 1),
::testing::ElementsAre(R{13, 16, 1}, R{9, 12, 2}, R{1, 8, 3}));
}
class ForEachManifestVersionTreeNodeRefPropertyTest
: public ::testing::TestWithParam<std::tuple<GenerationNumber, int>> {};
TEST_P(ForEachManifestVersionTreeNodeRefPropertyTest, Properties) {
auto [generation_number, version_tree_arity_log2] = GetParam();
auto range = GetRanges(generation_number, version_tree_arity_log2);
SCOPED_TRACE(
absl::StrFormat("generation_number=%d, version_tree_arity_log2=%d",
generation_number, version_tree_arity_log2));
SCOPED_TRACE(::testing::PrintToString(range));
for (size_t i = 0; i < range.size(); ++i) {
auto [min_gen, max_gen, height] = range[i];
SCOPED_TRACE(
absl::StrFormat("i=%d,height=%d,min_generation=%d,max_generation=%d", i,
height, min_gen, max_gen));
EXPECT_EQ(height, i + 1);
EXPECT_LT(max_gen, generation_number);
EXPECT_GT(max_gen, 0);
EXPECT_GT(min_gen, 0);
EXPECT_LT(min_gen, max_gen);
EXPECT_EQ(
0, max_gen % (GenerationNumber(1) << height * version_tree_arity_log2));
if (i == 0) {
EXPECT_GE(max_gen + (GenerationNumber(1) << version_tree_arity_log2),
generation_number);
}
if (i > 0) {
auto [prev_min_gen, prev_max_gen, prev_height] = range[i - 1];
EXPECT_EQ(prev_min_gen, max_gen + 1);
}
}
}
std::string PrintPropertyTestValue(
const ::testing::TestParamInfo<std::tuple<GenerationNumber, int>>& info) {
const auto [generation_number, version_tree_arity_log2] = info.param;
return absl::StrFormat("%d_%d", generation_number, version_tree_arity_log2);
}
INSTANTIATE_TEST_SUITE_P(
Combinations, ForEachManifestVersionTreeNodeRefPropertyTest,
::testing::Combine(::testing::ValuesIn<GenerationNumber>({
1,
2,
101,
12345,
567890,
}),
::testing::ValuesIn<int>({
1,
2,
3,
4,
})),
PrintPropertyTestValue);
INSTANTIATE_TEST_SUITE_P(
Simple, ForEachManifestVersionTreeNodeRefPropertyTest,
(::testing::ValuesIn<std::tuple<GenerationNumber, int>>({
{8, 2},
{9, 2},
{17, 2},
{43, 2},
{17, 1},
})),
PrintPropertyTestValue);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/manifest.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/manifest_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
bd4944f1-a2be-4199-8289-233b06c073b9 | cpp | google/tensorstore | gcs_testbench | tensorstore/kvstore/gcs/gcs_testbench.cc | tensorstore/kvstore/gcs_http/gcs_testbench_test.cc | #include "tensorstore/kvstore/gcs/gcs_testbench.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/flags/flag.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/status.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/transport_test_utils.h"
#include "tensorstore/internal/os/subprocess.h"
#include "tensorstore/proto/parse_text_proto_or_die.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "google/storage/v2/storage.grpc.pb.h"
#include "google/storage/v2/storage.pb.h"
ABSL_FLAG(std::string, testbench_binary, "",
"Path to the gcs storage-testbench rest_server");
namespace gcs_testbench {
using ::google::storage::v2::Storage;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
using ::tensorstore::internal::SpawnSubprocess;
using ::tensorstore::internal::Subprocess;
using ::tensorstore::internal::SubprocessOptions;
using ::tensorstore::internal_http::GetDefaultHttpTransport;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::transport_test_utils::TryPickUnusedPort;
StorageTestbench::StorageTestbench() = default;
std::string StorageTestbench::http_address() {
return absl::StrFormat("localhost:%d", http_port);
}
std::string StorageTestbench::grpc_address() {
return absl::StrFormat("localhost:%d", grpc_port);
}
void StorageTestbench::SpawnProcess() {
if (running) return;
const auto start_child = [&] {
http_port = TryPickUnusedPort().value_or(0);
ABSL_CHECK(http_port > 0);
ABSL_LOG(INFO) << "Spawning testbench: http:
{
SubprocessOptions options{absl::GetFlag(FLAGS_testbench_binary),
{absl::StrFormat("--port=%d", http_port)}};
TENSORSTORE_CHECK_OK_AND_ASSIGN(child, SpawnSubprocess(options));
}
};
start_child();
for (auto deadline = absl::Now() + absl::Seconds(30);;) {
absl::SleepFor(absl::Milliseconds(200));
if (!absl::IsUnavailable(child->Join(false).status())) {
start_child();
}
auto result =
GetDefaultHttpTransport()
->IssueRequest(
HttpRequestBuilder(
"GET", absl::StrFormat("http:
http_port))
.BuildRequest(),
IssueRequestOptions()
.SetRequestTimeout(absl::Seconds(15))
.SetConnectTimeout(absl::Seconds(15)))
.result();
if (result.ok()) {
if (result->status_code != 200) {
ABSL_LOG(ERROR) << "Failed to start grpc server: " << *result;
} else if (!absl::SimpleAtoi(result->payload.Flatten(), &grpc_port)) {
ABSL_LOG(ERROR) << "Unexpected response from start_grpc: " << *result;
} else {
break;
}
} else {
ABSL_LOG(ERROR) << "Failed to start grpc server: " << result.status();
}
if (absl::Now() < deadline && absl::IsUnavailable(result.status())) {
continue;
}
ABSL_LOG(FATAL) << "Failed to start testbench: " << result.status();
}
running = true;
}
StorageTestbench::~StorageTestbench() {
if (child) {
child->Kill().IgnoreError();
auto join_result = child->Join();
if (!join_result.ok()) {
ABSL_LOG(ERROR) << "Joining storage_testbench subprocess failed: "
<< join_result.status();
}
}
}
absl::Status StorageTestbench::CreateBucket(std::string grpc_endpoint,
std::string bucket) {
google::storage::v2::CreateBucketRequest bucket_request =
tensorstore::ParseTextProtoOrDie(R"pb(
parent: 'projects/12345'
bucket: { location: 'US' storage_class: 'STANDARD' }
bucket_id: 'bucket'
predefined_acl: 'publicReadWrite'
predefined_default_object_acl: 'publicReadWrite'
)pb");
bucket_request.set_bucket_id(bucket);
google::storage::v2::Bucket bucket_response;
std::shared_ptr<grpc::Channel> channel = grpc::CreateChannel(
grpc_endpoint, grpc::InsecureChannelCredentials());
if (!channel->WaitForConnected(
absl::ToChronoTime(absl::Now() + absl::Milliseconds(100)))) {
ABSL_LOG(WARNING) << "Failed to connect to grpc endpoint after 100ms: "
<< grpc_endpoint;
}
auto stub = Storage::NewStub(std::move(channel));
grpc::ClientContext client_context;
grpc::Status status =
stub->CreateBucket(&client_context, bucket_request, &bucket_response);
return GrpcStatusToAbslStatus(status);
}
} | #include "tensorstore/kvstore/gcs/gcs_testbench.h"
#include <stddef.h>
#include <cstring>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/call_once.h"
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/thread/thread.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace kvstore = ::tensorstore::kvstore;
using ::gcs_testbench::StorageTestbench;
using ::tensorstore::KvStore;
using ::tensorstore::StorageGeneration;
namespace {
StorageTestbench& GetTestBench() {
static absl::NoDestructor<StorageTestbench> testbench;
static absl::once_flag init_once;
absl::call_once(init_once, [&]() {
testbench->SpawnProcess();
static std::string http_address = testbench->http_address();
::tensorstore::internal::SetEnv("TENSORSTORE_GCS_HTTP_URL",
http_address.c_str());
::tensorstore::internal::SetEnv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "abc");
ABSL_LOG(INFO) << "Using " << http_address;
ABSL_LOG(INFO) << "Creating bucket: "
<< StorageTestbench::CreateBucket(testbench->grpc_address(),
"test_bucket");
});
return *testbench;
}
class GcsTestbenchTest : public testing::Test {
public:
tensorstore::KvStore OpenStore(std::string path = "") {
GetTestBench();
return kvstore::Open(
{{"driver", "gcs"}, {"bucket", "test_bucket"}, {"path", path}})
.value();
}
};
TEST_F(GcsTestbenchTest, Basic) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(GcsTestbenchTest, DeletePrefix) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST_F(GcsTestbenchTest, DeleteRange) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST_F(GcsTestbenchTest, DeleteRangeToEnd) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST_F(GcsTestbenchTest, DeleteRangeFromBeginning) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
TEST_F(GcsTestbenchTest, List) {
auto store = OpenStore("list/");
tensorstore::internal::TestKeyValueStoreList(store);
}
TEST_F(GcsTestbenchTest, CancellationDoesNotCrash) {
auto store = OpenStore("cancellation/");
static constexpr size_t kCount = 1000;
std::vector<std::string> keys;
keys.reserve(kCount);
for (size_t i = 0; i < kCount; ++i) {
keys.push_back(absl::StrCat(i));
}
absl::Cord value("xyzzyx");
std::vector<tensorstore::AnyFuture> futures;
futures.reserve(kCount * 2);
for (const auto& key : keys) {
futures.push_back(kvstore::Write(store, key, value));
}
for (const auto& key : keys) {
futures.push_back(kvstore::Read(store, key));
}
futures = {};
for (const auto& key : keys) {
futures.push_back(kvstore::Delete(store, key));
}
for (auto& future : futures) {
future.Wait();
}
}
TEST_F(GcsTestbenchTest, ConcurrentWrites) {
tensorstore::internal::TestConcurrentWritesOptions options;
auto store = OpenStore("concurrent_writes/");
options.get_store = [&] { return store; };
options.num_iterations = 0x3f;
tensorstore::internal::TestConcurrentWrites(options);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs/gcs_testbench.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_http/gcs_testbench_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
dd9bc663-fd0f-418e-9cc8-144ac62321b0 | cpp | google/tensorstore | index_vector_or_scalar | tensorstore/index_space/index_vector_or_scalar.cc | tensorstore/index_space/index_vector_or_scalar_test.cc | #include "tensorstore/index_space/index_vector_or_scalar.h"
#include <system_error>
#include "absl/status/status.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
absl::Status CheckIndexVectorSize(IndexVectorOrScalarView indices,
DimensionIndex size) {
if (indices.pointer && indices.size_or_scalar != size)
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of dimensions (", size, ") does not match number of indices (",
indices.size_or_scalar, ")"));
return absl::OkStatus();
}
}
} | #include "tensorstore/index_space/index_vector_or_scalar.h"
#include <cstdint>
#include <system_error>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::dynamic_extent;
using ::tensorstore::Index;
using ::tensorstore::IsIndexVectorOrScalar;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::CheckIndexVectorSize;
using ::tensorstore::internal_index_space::IndexVectorOrScalarView;
static_assert(IsIndexVectorOrScalar<Index>::value == true);
static_assert(IsIndexVectorOrScalar<std::int32_t>::value == true);
static_assert(IsIndexVectorOrScalar<float>::value == false);
static_assert(
std::is_same_v<
typename IsIndexVectorOrScalar<std::int32_t>::normalized_type, Index>);
static_assert(IsIndexVectorOrScalar<std::int32_t>::extent == dynamic_extent);
static_assert(IsIndexVectorOrScalar<std::vector<std::int32_t>>::value == false);
static_assert(IsIndexVectorOrScalar<const std::vector<Index>>::value == true);
static_assert(std::is_same_v<typename IsIndexVectorOrScalar<
const std::vector<Index>>::normalized_type,
span<const Index>>);
static_assert(IsIndexVectorOrScalar<const std::vector<Index>>::extent ==
dynamic_extent);
static_assert(IsIndexVectorOrScalar<span<const Index>>::value == true);
static_assert(
std::is_same_v<typename IsIndexVectorOrScalar<span<Index>>::normalized_type,
span<const Index>>);
static_assert(IsIndexVectorOrScalar<span<const Index>>::extent ==
dynamic_extent);
static_assert(IsIndexVectorOrScalar<span<const Index, 5>>::value == true);
static_assert(std::is_same_v<
typename IsIndexVectorOrScalar<span<Index, 5>>::normalized_type,
span<const Index, 5>>);
static_assert(IsIndexVectorOrScalar<span<Index, 5>>::extent == 5);
TEST(IndexVectorOrScalarTest, Scalar) {
IndexVectorOrScalarView v(5);
EXPECT_EQ(5, v.size_or_scalar);
EXPECT_EQ(nullptr, v.pointer);
EXPECT_EQ(5, v[0]);
EXPECT_EQ(5, v[1]);
EXPECT_TRUE(CheckIndexVectorSize(v, 3).ok());
}
TEST(IndexVectorOrScalarTest, Vector) {
const Index arr[] = {1, 2, 3};
IndexVectorOrScalarView v{span(arr)};
EXPECT_EQ(3, v.size_or_scalar);
EXPECT_EQ(&arr[0], v.pointer);
EXPECT_EQ(1, v[0]);
EXPECT_EQ(2, v[1]);
EXPECT_EQ(3, v[2]);
EXPECT_TRUE(CheckIndexVectorSize(v, 3).ok());
EXPECT_THAT(CheckIndexVectorSize(v, 5),
tensorstore::MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_vector_or_scalar.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_vector_or_scalar_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
08971125-e01f-4a75-81b3-81a1c6420689 | cpp | google/tensorstore | index_transform_builder | tensorstore/index_space/index_transform_builder.cc | tensorstore/index_space/index_transform_builder_test.cc | #include "tensorstore/index_space/index_transform_builder.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/dimension_labels.h"
#include "tensorstore/internal/integer_overflow.h"
namespace tensorstore {
namespace internal_index_space {
void InitializeTransformRepForBuilder(TransformRep* data) {
assert(data != nullptr);
const DimensionIndex output_rank = data->output_rank;
span<OutputIndexMap> maps = data->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
map.stride() = 0;
map.offset() = 0;
}
}
absl::Status SetOutputIndexMapsAndValidateTransformRep(
TransformRep* data, span<const OutputIndexMapInitializer> output_index_maps,
IntervalForm interval_form, BuilderFlags flags) {
const DimensionIndex input_rank = data->input_rank;
const DimensionIndex output_rank = data->output_rank;
assert(output_index_maps.size() == output_rank);
span<Index> input_origin = data->input_origin().first(input_rank);
span<Index> input_shape = data->input_shape().first(input_rank);
auto& implicit_lower_bounds = data->implicit_lower_bounds;
auto& implicit_upper_bounds = data->implicit_upper_bounds;
const auto implicit_mask = DimensionSet::UpTo(input_rank);
if ((flags & BuilderFlags::kSetLower) == BuilderFlags::kDefault) {
Index val =
(interval_form == IntervalForm::sized) &&
((flags & BuilderFlags::kSetUpper) == BuilderFlags::kSetUpper)
? 0
: -kInfIndex;
std::fill(input_origin.begin(), input_origin.end(), val);
}
if ((flags & BuilderFlags::kSetUpper) == BuilderFlags::kDefault) {
interval_form = IntervalForm::half_open;
std::fill(input_shape.begin(), input_shape.end(), kInfIndex + 1);
}
if ((flags & BuilderFlags::kSetImplicitLower) == BuilderFlags::kDefault) {
implicit_lower_bounds =
((flags & BuilderFlags::kSetLower) == BuilderFlags::kDefault) &&
interval_form != IntervalForm::sized;
}
if ((flags & BuilderFlags::kSetImplicitUpper) == BuilderFlags::kDefault) {
implicit_upper_bounds =
(flags & BuilderFlags::kSetUpper) == BuilderFlags::kDefault;
}
implicit_lower_bounds &= implicit_mask;
implicit_upper_bounds &= implicit_mask;
TENSORSTORE_RETURN_IF_ERROR(internal::ValidateDimensionLabelsAreUnique(
data->input_labels().first(input_rank)));
span<OutputIndexMap> maps = data->output_index_maps().first(output_rank);
switch (interval_form) {
case IntervalForm::sized:
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
Index& size = input_shape[input_dim];
if (size == kInfSize) {
size = kInfIndex + 1 - input_origin[input_dim];
}
TENSORSTORE_RETURN_IF_ERROR(
IndexInterval::Sized(input_origin[input_dim], size));
}
break;
case IntervalForm::closed:
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto interval, IndexInterval::Closed(input_origin[input_dim],
input_shape[input_dim]));
input_shape[input_dim] = interval.size();
}
break;
case IntervalForm::half_open:
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto interval, IndexInterval::HalfOpen(input_origin[input_dim],
input_shape[input_dim]));
input_shape[input_dim] = interval.size();
}
break;
default:
ABSL_UNREACHABLE();
}
const bool domain_is_explicitly_empty = IsDomainExplicitlyEmpty(data);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& initializer = output_index_maps[output_dim];
auto& map = maps[output_dim];
if (initializer.index_array.valid()) {
TENSORSTORE_RETURN_IF_ERROR(initializer.index_array_bounds);
span<const Index> shape = initializer.index_array.shape();
const Index* byte_strides = initializer.index_array.byte_strides().data();
if (shape.size() != input_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Index array for output dimension ", output_dim, " has rank ",
shape.size(), " but must have rank ", input_rank));
}
auto& index_array_data = map.SetArrayIndexing(shape.size());
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
const Index array_dim_size = shape[input_dim];
if (array_dim_size == 1) {
index_array_data.byte_strides[input_dim] = 0;
continue;
}
const Index input_size = input_shape[input_dim];
if (array_dim_size != input_size) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Index array for output dimension ", output_dim, " has shape ",
shape, " which does not match input_shape ", input_shape));
}
if (byte_strides[input_dim] == 0 && array_dim_size != 0) {
index_array_data.byte_strides[input_dim] = 0;
continue;
}
if (implicit_lower_bounds[input_dim] ||
implicit_upper_bounds[input_dim]) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Index array for output dimension ",
output_dim, " depends on input dimension ",
input_dim, " with implicit bounds"));
}
if (!IsFinite(IndexInterval::UncheckedSized(input_origin[input_dim],
input_size))) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Index array for output dimension ",
output_dim, " depends on input dimension ",
input_dim, " with infinite bounds"));
}
index_array_data.byte_strides[input_dim] = byte_strides[input_dim];
}
if (domain_is_explicitly_empty) {
map.SetConstant();
map.offset() = 0;
map.stride() = 0;
} else {
index_array_data.index_range = *initializer.index_array_bounds;
index_array_data.element_pointer = AddByteOffset(
initializer.index_array.element_pointer(),
internal::wrap_on_overflow::Subtract(
initializer.index_array.layout().origin_byte_offset(),
IndexInnerProduct(input_rank, input_origin.data(),
index_array_data.byte_strides)));
}
} else if (initializer.input_dimension) {
const DimensionIndex input_dim = *initializer.input_dimension;
if (input_dim < 0 || input_dim >= input_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Input dimension ", input_dim, " specified for output dimension ",
output_dim, " is outside valid range [0, ", input_rank, ")"));
}
if (map.stride() == 0) {
map.SetConstant();
} else {
map.SetSingleInputDimension(input_dim);
}
} else {
map.SetConstant();
map.stride() = 0;
}
}
internal_index_space::DebugCheckInvariants(data);
return absl::OkStatus();
}
}
} | #include "tensorstore/index_space/index_transform_builder.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionSet;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OutputIndexMethod;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TransformAccess;
TEST(IndexTransformTest, BuilderValid) {
auto index_array = MakeArray<Index>({{{1, 0, 2, 2}}});
auto t =
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9, index_array, IndexInterval::Closed(0, 3))
.Finalize()
.value();
static_assert(std::is_same_v<decltype(t), IndexTransform<3, 4>>);
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2, 3));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 2, 4));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("x", "y", "z"));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({0, 1, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 0, 0}));
EXPECT_EQ(IndexInterval::UncheckedSized(1, 2),
t.input_domain()[0].interval());
EXPECT_EQ(IndexInterval::UncheckedSized(2, 2),
t.input_domain()[1].interval());
EXPECT_EQ(IndexInterval::UncheckedSized(3, 4),
t.input_domain()[2].interval());
{
auto map = t.output_index_map(0);
EXPECT_EQ(OutputIndexMethod::constant, map.method());
EXPECT_EQ(4, map.offset());
EXPECT_EQ(0, map.stride());
}
{
auto map = t.output_index_map(1);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(2, map.input_dimension());
EXPECT_EQ(5, map.offset());
EXPECT_EQ(7, map.stride());
}
{
auto map = t.output_index_map(2);
EXPECT_EQ(OutputIndexMethod::constant, map.method());
EXPECT_EQ(6, map.offset());
EXPECT_EQ(0, map.stride());
}
{
auto map = t.output_index_map(3);
EXPECT_EQ(OutputIndexMethod::array, map.method());
EXPECT_EQ(7, map.offset());
EXPECT_EQ(9, map.stride());
auto index_array_ref = map.index_array();
EXPECT_EQ(&index_array(0, 0, 0), &index_array_ref.array_ref()(1, 2, 3));
EXPECT_THAT(index_array_ref.layout().byte_strides(),
::testing::ElementsAre(0, 0, sizeof(Index)));
}
{
std::array<Index, 4> output_indices;
ASSERT_EQ(
absl::OkStatus(),
t.TransformIndices(span<const Index, 3>({1, 2, 3}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(4, 26, 6, 16));
}
}
TEST(IndexTransformBuilderTest, Nullptr) {
IndexTransformBuilder<> builder(nullptr);
EXPECT_FALSE(builder.valid());
{
IndexTransformBuilder<> other_builder(builder);
EXPECT_FALSE(other_builder.valid());
}
{
IndexTransformBuilder<> other_builder(nullptr);
other_builder = builder;
EXPECT_FALSE(other_builder.valid());
}
}
TEST(IndexTransformBuilderTest, Move) {
IndexTransformBuilder<> builder(1, 1);
EXPECT_TRUE(builder.valid());
builder.input_origin({1});
auto builder2 = std::move(builder);
EXPECT_TRUE(builder2.valid());
EXPECT_FALSE(builder.valid());
builder2.output_constant(0, 5);
EXPECT_THAT(builder2.Finalize().value(), IndexTransformBuilder<>(1, 1)
.input_origin({1})
.output_constant(0, 5)
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, Copy) {
IndexTransformBuilder<> builder(1, 1);
EXPECT_TRUE(builder.valid());
builder.input_origin({1});
auto builder2 = builder;
EXPECT_TRUE(builder.valid());
EXPECT_TRUE(builder2.valid());
builder.output_constant(0, 4);
builder2.output_constant(0, 5);
EXPECT_THAT(builder.Finalize().value(), IndexTransformBuilder<>(1, 1)
.input_origin({1})
.output_constant(0, 4)
.Finalize()
.value());
EXPECT_THAT(builder2.Finalize().value(), IndexTransformBuilder<>(1, 1)
.input_origin({1})
.output_constant(0, 5)
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, Default) {
auto t = IndexTransformBuilder<>(2, 1).Finalize().value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(-kInfIndex, -kInfIndex));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(kInfSize, kInfSize));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
auto map = t.output_index_map(0);
EXPECT_EQ(0, map.offset());
EXPECT_EQ(0, map.stride());
EXPECT_EQ(OutputIndexMethod::constant, map.method());
}
TEST(IndexTransformBuilderTest, InputOriginSpecified) {
auto t =
IndexTransformBuilder<>(2, 0).input_origin({1, 2}).Finalize().value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(1, kInfIndex));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, ImplicitLowerBoundsSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.implicit_lower_bounds({1, 0})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, InputShapeSpecified) {
auto t =
IndexTransformBuilder<>(2, 0).input_shape({5, 10}).Finalize().value();
EXPECT_EQ(t.domain()[0].interval(), IndexInterval::UncheckedSized(0, 5));
EXPECT_EQ(t.domain()[1].interval(), IndexInterval::UncheckedSized(0, 10));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, InputInclusiveMaxSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.input_inclusive_max({5, 10})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, 5));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, 10));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, InputExclusiveMaxSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.input_exclusive_max({5, 10})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedHalfOpen(-kInfIndex, 5));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedHalfOpen(-kInfIndex, 10));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, ImplicitUpperBoundsSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.implicit_upper_bounds({1, 0})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, SingleInputDimensionDefaults) {
EXPECT_EQ(IndexTransformBuilder<>(3, 1)
.output_single_input_dimension(0, 2)
.Finalize()
.value(),
IndexTransformBuilder<>(3, 1)
.output_single_input_dimension(0, 2)
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, InputOriginOutOfRange) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_origin({-kInfIndex - 1, -kInfIndex})
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".* do not specify a valid half-open index interval"));
}
TEST(IndexTransformBuilderTest, InputShapeOutOfRange) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1).input_shape({1, -1}).Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"\\(0, -1\\) do not specify a valid sized index interval"));
}
TEST(IndexTransformBuilderTest, InvalidInputDimensionNegative) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.output_single_input_dimension(0, 0, 1, -1)
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Input dimension -1 specified for output dimension 0 "
"is outside valid range \\[0, 2\\)"));
}
TEST(IndexTransformBuilderTest, InvalidInputDimensionPositive) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.output_single_input_dimension(0, 2)
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Input dimension 2 specified for output dimension 0 "
"is outside valid range \\[0, 2\\)"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayRank) {
EXPECT_THAT(IndexTransformBuilder<>(2, 1)
.output_index_array(0, 0, 1, MakeArray<Index>({1}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"has rank 1 but must have rank 2"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayShape) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({2, 2})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2}, {3, 4}, {5, 6}}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 has shape \\{3, 2\\} "
"which does not match input_shape \\{2, 2\\}"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayImplicitLowerBound) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({3, 2})
.implicit_lower_bounds({1, 0})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2}, {3, 4}, {5, 6}}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"depends on input dimension 0 with implicit bounds"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayImplicitUpperBound) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({3, 2})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2}, {3, 4}, {5, 6}}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"depends on input dimension 0 with implicit bounds"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayIndexRange) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2}, {3, 4}}),
IndexInterval::Sized(3, -1))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"\\(3, -1\\) do not specify a valid sized index interval"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayWithUnboundedDomain) {
EXPECT_THAT(
IndexTransformBuilder(1, 1)
.input_origin({tensorstore::kMaxFiniteIndex})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"depends on input dimension 0 with infinite bounds"));
}
TEST(IndexTransformBuilderDeathTest, InvalidArguments) {
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).input_origin({1, 2, 3})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).input_shape({1, 2, 3})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).implicit_lower_bounds({1, 1, 0})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).implicit_upper_bounds({1, 1, 0})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).input_labels({"a"})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).output_constant(1, 0)),
"invalid output dimension");
}
TEST(IndexTransformBuilderTest, OutputStrideZero) {
auto t = IndexTransformBuilder<>(1, 1)
.output_single_input_dimension(0, 1, 0, 0)
.Finalize()
.value();
auto map = t.output_index_map(0);
EXPECT_EQ(1, map.offset());
EXPECT_EQ(0, map.stride());
EXPECT_EQ(OutputIndexMethod::constant, map.method());
}
TEST(IndexTransformBuilderTest, InclusiveMax) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_inclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(3, 4));
}
TEST(IndexTransformBuilderTest, InputShapeInfSize) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, kInfSize})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(3, kInfIndex + 1 - 2));
}
TEST(IndexTransformBuilderTest, ExclusiveMax) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_exclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexTransformBuilderTest, ExclusiveMaxAfterShape) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({15, 16})
.input_exclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexTransformBuilderTest, InputDomainBox) {
auto t = IndexTransformBuilder<>(2, 2)
.input_bounds(tensorstore::BoxView({1, 2}, {2, 3}))
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexTransformBuilderTest, InputDomain) {
tensorstore::IndexDomain<2> domain(IndexTransformBuilder<2, 0>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_labels({"x", "y"})
.Finalize()
.value()
.domain());
auto t =
IndexTransformBuilder<>(2, 2).input_domain(domain).Finalize().value();
EXPECT_EQ(domain, t.domain());
}
TEST(IndexTransformBuilderTest, OutputIdentityTransform) {
EXPECT_THAT(
IndexTransformBuilder(2, 2).output_identity_transform().Finalize(),
::testing::Optional(tensorstore::IdentityTransform(2)));
EXPECT_EQ(IndexTransformBuilder(3, 2)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder(3, 2)
.output_identity_transform()
.Finalize()
.value());
EXPECT_EQ(IndexTransformBuilder(2, 3)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_constant(2, 0)
.Finalize()
.value(),
IndexTransformBuilder(2, 3)
.output_identity_transform()
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, CopyOutputMap) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(3, 4)
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9, MakeArray<Index>({{{1, 0, 2, 2}}}),
IndexInterval::Closed(0, 3))
.Finalize());
EXPECT_THAT(IndexTransformBuilder(3, 4)
.input_domain(t.domain())
.output_maps(t.output_index_maps())
.Finalize(),
::testing::Optional(t));
EXPECT_THAT(IndexTransformBuilder(3, 4)
.input_domain(t.domain())
.output_constant(0, 4)
.output_map(1, t.output_index_maps()[1])
.output_map(2, t.output_index_maps()[2])
.output_map(3, t.output_index_maps()[3])
.Finalize(),
::testing::Optional(t));
}
TEST(InitializeTransformRepForBuilder, Basic) {
auto source = tensorstore::internal_index_space::TransformRep::Allocate(1, 2);
source->output_rank = 2;
tensorstore::internal_index_space::InitializeTransformRepForBuilder(
source.get());
EXPECT_EQ(0, source->output_index_maps()[0].offset());
EXPECT_EQ(0, source->output_index_maps()[0].stride());
EXPECT_EQ(0, source->output_index_maps()[1].offset());
EXPECT_EQ(0, source->output_index_maps()[1].stride());
}
TEST(IndexTransformBuilder, NonUniqueLabels) {
EXPECT_THAT(
IndexTransformBuilder<>(3, 0).input_labels({"a", "", "a"}).Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label\\(s\\) \"a\" not unique"));
}
TEST(IndexTransformBuilderTest, IndexArrayWithEmptyExplicitDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected,
IndexTransformBuilder(2, 2)
.input_shape({0, 2})
.output_constant(0, 0)
.output_constant(1, 1)
.Finalize());
EXPECT_THAT(IndexTransformBuilder(2, 2)
.input_shape({0, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{2, 3}}))
.output_constant(1, 1)
.Finalize(),
::testing::Optional(expected));
}
TEST(IndexDomainBuilderTest, Null) {
IndexDomainBuilder builder(nullptr);
EXPECT_FALSE(builder.valid());
}
TEST(IndexDomainBuilderTest, Basic) {
IndexDomainBuilder builder(3);
EXPECT_EQ(3, builder.rank());
builder.origin(span<const Index, 3>({1, 2, 3}));
EXPECT_THAT(builder.origin(), ::testing::ElementsAre(1, 2, 3));
builder.shape(span<const Index, 3>({4, 5, 6}));
EXPECT_THAT(builder.shape(), ::testing::ElementsAre(4, 5, 6));
builder.exclusive_max(span<const Index, 3>({4, 5, 6}));
EXPECT_THAT(builder.exclusive_max(), ::testing::ElementsAre(4, 5, 6));
builder.inclusive_max(span<const Index, 3>({4, 5, 6}));
EXPECT_THAT(builder.inclusive_max(), ::testing::ElementsAre(4, 5, 6));
builder.implicit_lower_bounds({0, 1, 1});
builder.implicit_upper_bounds({1, 0, 1});
EXPECT_THAT(builder.implicit_lower_bounds(),
DimensionSet::FromBools({0, 1, 1}));
EXPECT_THAT(builder.implicit_upper_bounds(),
DimensionSet::FromBools({1, 0, 1}));
builder.labels(std::vector<std::string>{"x", "y", "z"});
EXPECT_THAT(builder.labels(), ::testing::ElementsAre("x", "y", "z"));
}
TEST(IndexDomainBuilderTest, Labels) {
auto d = IndexDomainBuilder(2).labels({"x", "y"}).Finalize().value();
EXPECT_THAT(d.labels(), ::testing::ElementsAre("x", "y"));
}
TEST(IndexDomainBuilderTest, InclusiveMax) {
auto d = IndexDomainBuilder(2)
.origin({1, 2})
.inclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(3, 4));
}
TEST(IndexDomainBuilderTest, Shape) {
auto d =
IndexDomainBuilder(2).origin({1, 2}).shape({3, 5}).Finalize().value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(3, 5));
}
TEST(IndexDomainBuilderTest, ExclusiveMax) {
auto d = IndexDomainBuilder(2)
.origin({1, 2})
.exclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexDomainBuilderTest, InputDomainBox) {
auto d = IndexDomainBuilder(2)
.bounds(tensorstore::BoxView({1, 2}, {2, 3}))
.Finalize()
.value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexDomainBuilderTest, InputDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(tensorstore::IndexDomain<2> domain,
IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.labels({"x", "y"})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto d, IndexDomainBuilder<>(2).domain(domain).Finalize());
EXPECT_EQ(domain, d);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_transform_builder.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_transform_builder_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7dc63e63-9db6-4acf-bf59-96ebbb710399 | cpp | google/tensorstore | transform_broadcastable_array | tensorstore/index_space/transform_broadcastable_array.cc | tensorstore/index_space/transform_broadcastable_array_test.cc | #include "tensorstore/index_space/transform_broadcastable_array.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
Result<SharedArray<const void>> TransformOutputBroadcastableArray(
IndexTransformView<> transform, SharedArrayView<const void> output_array,
IndexDomainView<> output_domain) {
assert(transform.valid());
Box<dynamic_rank(kMaxRank)> broadcast_domain(transform.output_rank());
if (output_domain.valid()) {
broadcast_domain = output_domain.box();
} else {
TENSORSTORE_RETURN_IF_ERROR(
tensorstore::GetOutputRange(transform, broadcast_domain));
const DimensionIndex output_rank = transform.output_rank();
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::array: {
broadcast_domain[output_dim] = IndexInterval();
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
if (map.stride() != 1 && map.stride() != -1) {
broadcast_domain[output_dim] = IndexInterval::Infinite();
} else {
const DimensionIndex output_array_dim =
output_dim + output_array.rank() - output_rank;
if (output_array_dim >= 0 &&
transform.domain()[input_dim].optionally_implicit_interval() ==
OptionallyImplicitIndexInterval{IndexInterval::Infinite(),
true, true}) {
broadcast_domain[output_dim] =
output_array.domain()[output_array_dim];
}
}
break;
}
}
}
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto broadcast_output_array,
tensorstore::BroadcastArray(std::move(output_array), broadcast_domain));
TENSORSTORE_ASSIGN_OR_RETURN(auto input_array,
std::move(broadcast_output_array) | transform |
tensorstore::Materialize());
return UnbroadcastArray(std::move(input_array));
}
Result<SharedArray<const void>> TransformInputBroadcastableArray(
IndexTransformView<> transform, SharedArrayView<const void> input_array) {
assert(transform.valid());
SharedArray<const void> output_array;
output_array.layout().set_rank(transform.output_rank());
DimensionSet seen_input_dims;
ByteStridedPointer<const void> data_pointer =
input_array.byte_strided_pointer();
const DimensionIndex input_rank = transform.input_rank();
for (DimensionIndex output_dim = 0; output_dim < output_array.rank();
++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Cannot transform input array through ",
map.method(), " output index map"));
}
const DimensionIndex input_dim = map.input_dimension();
if (seen_input_dims[input_dim]) {
return absl::InvalidArgumentError(
"Cannot transform input array with multiple "
"output dimensions mapping to the same input dimension");
}
if (std::abs(map.stride()) != 1) {
return absl::InvalidArgumentError(
"Cannot transform input array through "
"non-unit-stride output index map");
}
seen_input_dims[input_dim] = true;
const DimensionIndex input_array_dim =
input_array.rank() - input_rank + input_dim;
if (input_array_dim < 0) {
output_array.shape()[output_dim] = 1;
output_array.byte_strides()[output_dim] = 0;
} else {
const Index size = input_array.shape()[input_array_dim];
output_array.shape()[output_dim] = size;
const Index byte_stride = input_array.byte_strides()[input_array_dim];
const Index stride = map.stride();
output_array.byte_strides()[output_dim] =
internal::wrap_on_overflow::Multiply(byte_stride, stride);
if (stride == -1 && size != 0) {
data_pointer +=
internal::wrap_on_overflow::Multiply(byte_stride, size - 1);
}
}
}
for (DimensionIndex input_array_dim = 0; input_array_dim < input_array.rank();
++input_array_dim) {
if (input_array.shape()[input_array_dim] == 1 ||
input_array.byte_strides()[input_array_dim] == 0) {
continue;
}
const DimensionIndex input_dim =
input_rank - input_array.rank() + input_array_dim;
if (input_dim < 0 || !seen_input_dims[input_dim]) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Cannot transform input array; "
"dimension ",
input_array_dim, " cannot be mapped"));
}
}
output_array.element_pointer() = SharedElementPointer<const void>(
std::shared_ptr<const void>(std::move(input_array.pointer()),
data_pointer.get()),
input_array.dtype());
return UnbroadcastArray(std::move(output_array));
}
} | #include "tensorstore/index_space/transform_broadcastable_array.h"
#include <stddef.h>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexDomainView;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::SharedArray;
using ::tensorstore::SharedArrayView;
using ::tensorstore::span;
using ::tensorstore::TransformInputBroadcastableArray;
using ::tensorstore::TransformOutputBroadcastableArray;
void TestRoundTrip(IndexTransformView<> transform,
SharedArrayView<const void> input_array,
SharedArrayView<const void> output_array,
IndexDomainView<> output_domain) {
SCOPED_TRACE(tensorstore::StrCat(
"transform=", transform, ", output_domain=", output_domain,
", input_array.shape=", input_array.shape(),
", output_array.shape=", output_array.shape()));
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, output_array, output_domain),
::testing::Optional(input_array));
EXPECT_THAT(TransformInputBroadcastableArray(transform, input_array),
::testing::Optional(output_array));
}
void TestRoundTrip(IndexTransformView<> transform,
SharedArrayView<const void> output_array,
IndexDomainView<> output_domain = IndexDomainView<>(),
bool test_inverse = false) {
SCOPED_TRACE(tensorstore::StrCat(
"transform=", transform, ", output_domain=", output_domain,
", output_array.shape=", output_array.shape()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto input_array,
TransformOutputBroadcastableArray(
transform, output_array, output_domain));
EXPECT_THAT(TransformInputBroadcastableArray(transform, input_array),
::testing::Optional(output_array));
if (test_inverse) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inverse_transform,
tensorstore::InverseTransform(transform));
EXPECT_THAT(
TransformInputBroadcastableArray(inverse_transform, output_array),
::testing::Optional(input_array));
}
}
SharedArray<int> MakeTestArray(span<const Index> shape) {
auto array = tensorstore::AllocateArray<int>(shape);
for (Index i = 0, num_elements = array.num_elements(); i < num_elements;
++i) {
array.data()[i] = i;
}
return array;
}
TEST(RoundTripTest, IdentityTransform) {
for (DimensionIndex rank = 0; rank <= 3; ++rank) {
SCOPED_TRACE(tensorstore::StrCat("rank=", rank));
std::vector<Index> shape(rank);
for (DimensionIndex dim = 0; dim < rank; ++dim) {
shape[dim] = dim + 2;
}
auto array = MakeTestArray(shape);
TestRoundTrip(tensorstore::IdentityTransform(shape), array, array,
tensorstore::IndexDomain<>());
TestRoundTrip(tensorstore::IdentityTransform(rank), array, array,
tensorstore::IndexDomain<>());
TestRoundTrip(tensorstore::IdentityTransform(shape), array, array,
tensorstore::IdentityTransform(shape).domain());
}
}
TEST(RoundTripTest, RandomInvertibleTransform) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_BROADCASTABLE_ARRAY_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto array = tensorstore::UnbroadcastArray(MakeTestArray(box.shape()));
auto domain = IndexDomain(box);
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain);
TestRoundTrip(transform, array);
TestRoundTrip(transform, array, domain);
}
}
TEST(RoundTripTest, RandomInvertibleTransformNoNewDims) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_BROADCASTABLE_ARRAY_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto array = tensorstore::UnbroadcastArray(MakeTestArray(box.shape()));
auto domain = IndexDomain(box);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters p;
p.max_new_dims = 0;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain, p);
TestRoundTrip(transform, array, IndexDomain(), true);
TestRoundTrip(transform, array, domain, true);
}
}
TEST(TransformOutputBroadcastableArrayTest, ConstantMap) {
auto array = MakeArray<int>({{1}, {2}, {3}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, IndexTransformBuilder(1, 2)
.output_single_input_dimension(0, 5, -1, 0)
.output_constant(1, 42)
.Finalize());
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, array, IndexDomain()),
::testing::Optional(MakeArray<int>({3, 2, 1})));
}
TEST(TransformOutputBroadcastableArrayTest, NonUnitStrideMap) {
auto array = MakeArray<int>({{1}, {2}, {3}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, IndexTransformBuilder(2, 2)
.output_single_input_dimension(0, 5, -1, 0)
.output_single_input_dimension(1, 42, 2, 1)
.Finalize());
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, array, IndexDomain()),
::testing::Optional(MakeArray<int>({{3}, {2}, {1}})));
}
TEST(TransformOutputBroadcastableArrayTest, ArrayMap) {
auto array = MakeArray<int>({{1}, {2}, {3}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 2)
.input_shape({3})
.output_single_input_dimension(0, 5, -1, 0)
.output_index_array(1, 20, 1, MakeArray<Index>({0, 5, 10}))
.Finalize());
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, array, IndexDomain()),
::testing::Optional(MakeArray<int>({3, 2, 1})));
}
TEST(TransformInputBroadcastableArrayTest, ConstantMap) {
auto array = MakeScalarArray<int>(42);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(0, 1).output_constant(0, 42).Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array through constant output index map"));
}
TEST(TransformInputBroadcastableArrayTest, NonUnitStrideMap) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, IndexTransformBuilder(1, 1)
.output_single_input_dimension(0, 5, 2, 0)
.Finalize());
EXPECT_THAT(TransformInputBroadcastableArray(transform, array),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot transform input array through "
"non-unit-stride output index map"));
}
TEST(TransformInputBroadcastableArrayTest, ArrayMap) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 20, 1, MakeArray<Index>({0, 5, 10}))
.Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array through array output index map"));
}
TEST(TransformInputBroadcastableArrayTest, Diagonal) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(1, 2)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot transform input array with multiple "
"output dimensions mapping to the same input dimension"));
}
TEST(TransformInputBroadcastableArrayTest, UnmappedNoError) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(2, 1)
.output_single_input_dimension(0, 1)
.Finalize());
EXPECT_THAT(TransformInputBroadcastableArray(transform, array),
::testing::Optional(array));
}
TEST(TransformInputBroadcastableArrayTest, UnmappedError) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(2, 1)
.output_single_input_dimension(0, 0)
.Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array; dimension 0 cannot be mapped"));
}
TEST(TransformInputBroadcastableArrayTest, ExtraDimensionError) {
auto array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
EXPECT_THAT(
TransformInputBroadcastableArray(tensorstore::IdentityTransform(1),
array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array; dimension 0 cannot be mapped"));
}
TEST(TransformInputBroadcastableArrayTest, ExtraDimensionNoError) {
auto array = MakeArray<int>({{1, 2, 3}});
EXPECT_THAT(TransformInputBroadcastableArray(
tensorstore::IdentityTransform(1), array),
::testing::Optional(MakeArray<int>({1, 2, 3})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_broadcastable_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_broadcastable_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e27303bd-5458-4010-86a5-e3c07c076a7b | cpp | google/tensorstore | dimension_identifier | tensorstore/index_space/dimension_identifier.cc | tensorstore/index_space/dimension_identifier_test.cc | #include "tensorstore/index_space/dimension_identifier.h"
#include <cassert>
#include <ostream>
#include <string>
#include <system_error>
#include <variant>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, const DimensionIdentifier& x) {
if (x.label().data()) {
return os << QuoteString(x.label());
}
return os << x.index();
}
Result<DimensionIndex> NormalizeDimensionIndex(DimensionIndex index,
DimensionIndex rank) {
assert(rank >= 0);
if (index < -rank || index >= rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Dimension index ", index, " is outside valid range [-", rank, ", ",
rank, ")"));
}
return index >= 0 ? index : index + rank;
}
Result<DimensionIndex> NormalizeDimensionExclusiveStopIndex(
DimensionIndex index, DimensionIndex rank) {
assert(rank >= 0);
if (index < -rank - 1 || index > rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Dimension exclusive stop index ", index, " is outside valid range [-",
rank + 1, ", ", rank, "]"));
}
return index >= 0 ? index : index + rank;
}
namespace {
template <typename Label>
Result<DimensionIndex> NormalizeDimensionLabelImpl(std::string_view label,
span<const Label> labels) {
if (label.empty()) {
return absl::InvalidArgumentError(
"Dimension cannot be specified by empty label");
}
const DimensionIndex dim =
std::find(labels.begin(), labels.end(), label) - labels.begin();
if (dim == labels.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Label ", QuoteString(label), " does not match one of {",
absl::StrJoin(labels, ", ",
[](std::string* out, std::string_view x) {
*out += QuoteString(x);
}),
"}"));
}
return dim;
}
}
Result<DimensionIndex> NormalizeDimensionLabel(std::string_view label,
span<const std::string> labels) {
return NormalizeDimensionLabelImpl(label, labels);
}
Result<DimensionIndex> NormalizeDimensionLabel(
std::string_view label, span<const std::string_view> labels) {
return NormalizeDimensionLabelImpl(label, labels);
}
Result<DimensionIndex> NormalizeDimensionIdentifier(
DimensionIdentifier identifier, span<const std::string> labels) {
if (identifier.label().data()) {
return NormalizeDimensionLabel(identifier.label(), labels);
} else {
return NormalizeDimensionIndex(identifier.index(), labels.size());
}
}
std::ostream& operator<<(std::ostream& os, const DimRangeSpec& spec) {
if (spec.inclusive_start) os << *spec.inclusive_start;
os << ':';
if (spec.exclusive_stop) os << *spec.exclusive_stop;
if (spec.step != 1) os << ':' << spec.step;
return os;
}
bool operator==(const DimRangeSpec& a, const DimRangeSpec& b) {
return a.inclusive_start == b.inclusive_start &&
a.exclusive_stop == b.exclusive_stop && a.step == b.step;
}
absl::Status NormalizeDimRangeSpec(const DimRangeSpec& spec,
DimensionIndex rank,
DimensionIndexBuffer* result) {
const DimensionIndex step = spec.step;
if (step == 0) {
return absl::InvalidArgumentError("step must not be 0");
}
DimensionIndex inclusive_start;
if (spec.inclusive_start) {
TENSORSTORE_ASSIGN_OR_RETURN(
inclusive_start, NormalizeDimensionIndex(*spec.inclusive_start, rank));
} else if (step > 0) {
inclusive_start = 0;
} else {
inclusive_start = rank - 1;
}
DimensionIndex exclusive_stop;
if (spec.exclusive_stop) {
TENSORSTORE_ASSIGN_OR_RETURN(
exclusive_stop,
NormalizeDimensionExclusiveStopIndex(*spec.exclusive_stop, rank));
if ((step > 0 && exclusive_stop < inclusive_start) ||
(step < 0 && exclusive_stop > inclusive_start)) {
return absl::InvalidArgumentError(
tensorstore::StrCat(spec, " is not a valid range"));
}
} else if (step > 0) {
exclusive_stop = rank;
} else {
exclusive_stop = -1;
}
const DimensionIndex size =
CeilOfRatio(exclusive_stop - inclusive_start, step);
result->reserve(result->size() + size);
for (DimensionIndex i = 0; i < size; ++i) {
result->push_back(inclusive_start + step * i);
}
return absl::OkStatus();
}
absl::Status NormalizeDynamicDimSpec(const DynamicDimSpec& spec,
span<const std::string> labels,
DimensionIndexBuffer* result) {
struct Visitor {
span<const std::string> labels;
DimensionIndexBuffer* result;
absl::Status operator()(DimensionIndex i) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionIndex(i, labels.size()));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const std::string& label) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionLabel(label, labels));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const DimRangeSpec& s) const {
return NormalizeDimRangeSpec(s, labels.size(), result);
}
};
return std::visit(Visitor{labels, result}, spec);
}
absl::Status NormalizeDynamicDimSpecs(span<const DynamicDimSpec> specs,
span<const std::string> labels,
DimensionIndexBuffer* result) {
for (const auto& spec : specs) {
TENSORSTORE_RETURN_IF_ERROR(NormalizeDynamicDimSpec(spec, labels, result));
}
return absl::OkStatus();
}
} | #include "tensorstore/index_space/dimension_identifier.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DimensionIdentifier;
using ::tensorstore::DimensionIndexBuffer;
using ::tensorstore::DimRangeSpec;
using ::tensorstore::DynamicDimSpec;
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::NormalizeDimensionIdentifier;
using ::tensorstore::NormalizeDimensionIndex;
using ::tensorstore::span;
using ::tensorstore::StrCat;
TEST(DimensionIdentifierTest, ConstructDefault) {
DimensionIdentifier d;
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ(nullptr, d.label().data());
}
TEST(DimensionIdentifierTest, ConstructDimensionIndex) {
DimensionIdentifier d(5);
EXPECT_EQ(5, d.index());
EXPECT_EQ(nullptr, d.label().data());
}
TEST(DimensionIdentifierTest, ConstructStringView) {
DimensionIdentifier d(std::string_view("hello"));
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ("hello", d.label());
}
TEST(DimensionIdentifierTest, ConstructCString) {
DimensionIdentifier d("hello");
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ("hello", d.label());
}
TEST(DimensionIdentifierTest, ConstructStdString) {
std::string s = "hello";
DimensionIdentifier d(s);
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ("hello", d.label());
}
TEST(DimensionIdentifierTest, Compare) {
EXPECT_EQ(DimensionIdentifier(3), DimensionIdentifier(3));
EXPECT_EQ(DimensionIdentifier("a"), DimensionIdentifier("a"));
EXPECT_NE(DimensionIdentifier("a"), DimensionIdentifier(2));
EXPECT_NE(DimensionIdentifier("a"), DimensionIdentifier("b"));
EXPECT_NE(DimensionIdentifier(2), DimensionIdentifier(3));
}
TEST(DimensionIdentifierTest, PrintToOstream) {
EXPECT_EQ("3", StrCat(DimensionIdentifier(3)));
EXPECT_EQ("\"a\"", StrCat(DimensionIdentifier("a")));
}
TEST(NormalizeDimensionIndexTest, ValidNonNegative) {
EXPECT_EQ(0, NormalizeDimensionIndex(0, 5));
EXPECT_EQ(3, NormalizeDimensionIndex(3, 5));
EXPECT_EQ(4, NormalizeDimensionIndex(4, 5));
}
TEST(NormalizeDimensionIndexTest, ValidNegative) {
EXPECT_EQ(0, NormalizeDimensionIndex(-5, 5));
EXPECT_EQ(2, NormalizeDimensionIndex(-3, 5));
EXPECT_EQ(4, NormalizeDimensionIndex(-1, 5));
}
TEST(NormalizeDimensionIndexTest, InvalidNegative) {
EXPECT_THAT(NormalizeDimensionIndex(-6, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NormalizeDimensionIndex(-7, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionIndexTest, InvalidNonNegative) {
EXPECT_THAT(NormalizeDimensionIndex(5, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NormalizeDimensionIndex(6, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionLabelTest, ValidLabel) {
EXPECT_EQ(2, NormalizeDimensionLabel(
"x", span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionLabelTest, MissingLabel) {
EXPECT_THAT(NormalizeDimensionLabel(
"w", span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionLabelTest, EmptyLabel) {
EXPECT_THAT(NormalizeDimensionLabel(
"", span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionIdentifierTest, ValidLabel) {
EXPECT_EQ(2, NormalizeDimensionIdentifier(
"x", span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionIdentifierTest, ValidPositiveIndex) {
EXPECT_EQ(2, NormalizeDimensionIdentifier(
2, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(0, NormalizeDimensionIdentifier(
0, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(3, NormalizeDimensionIdentifier(
3, span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionIdentifierTest, ValidNegativeIndex) {
EXPECT_EQ(2, NormalizeDimensionIdentifier(
-2, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(3, NormalizeDimensionIdentifier(
-1, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(0, NormalizeDimensionIdentifier(
-4, span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionIdentifierTest, InvalidIndex) {
EXPECT_THAT(NormalizeDimensionIdentifier(
4, span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NormalizeDimensionIdentifier(
-5, span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(DimRangeSpecTest, Comparison) {
DimRangeSpec a{1, 5, 1};
DimRangeSpec b{0, 5, 1};
DimRangeSpec c{1, 6, 1};
DimRangeSpec d{1, 6, 2};
EXPECT_EQ(a, a);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
}
TEST(DimRangeSpecTest, PrintToOstream) {
EXPECT_EQ("1:5", StrCat(DimRangeSpec{1, 5, 1}));
EXPECT_EQ("1:5:2", StrCat(DimRangeSpec{1, 5, 2}));
EXPECT_EQ(":5", StrCat(DimRangeSpec{std::nullopt, 5, 1}));
EXPECT_EQ("1:", StrCat(DimRangeSpec{1, std::nullopt, 1}));
EXPECT_EQ(":", StrCat(DimRangeSpec{std::nullopt, std::nullopt, 1}));
EXPECT_EQ("::-1", StrCat(DimRangeSpec{std::nullopt, std::nullopt, -1}));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{2, 10, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(2, 3, 4, 5, 6, 7, 8, 9));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStep2) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{2, 10, 2}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(2, 4, 6, 8));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStep2Floor) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{2, 7, 3}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(2, 5));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStepNeg1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{9, 1, -1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(9, 8, 7, 6, 5, 4, 3, 2));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStepNeg2) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{9, 1, -2}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(9, 7, 5, 3));
}
TEST(NormalizeDimRangeSpecTest, ValidStartOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{15, std::nullopt, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(15, 16, 17, 18, 19));
}
TEST(NormalizeDimRangeSpecTest, ValidStartOnlyStepNegative1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{5, std::nullopt, -1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(5, 4, 3, 2, 1, 0));
}
TEST(NormalizeDimRangeSpecTest, ValidNegativeStartOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{-5, std::nullopt, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(15, 16, 17, 18, 19));
}
TEST(NormalizeDimRangeSpecTest, ValidStopOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, 5, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidNegativeStopOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, -15, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidStopOnlyStepNeg1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, 15, -1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(19, 18, 17, 16));
}
TEST(NormalizeDimRangeSpecTest, ValidNoBoundsStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, std::nullopt, 1},
5, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidNoBoundsStep2) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, std::nullopt, 2},
5, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 2, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidMaxStop) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{1, 5, 1}, 5, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, InvalidStep0) {
DimensionIndexBuffer buffer;
EXPECT_THAT(
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, std::nullopt, 0}, 5,
&buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument, "step must not be 0"));
}
TEST(NormalizeDimRangeSpecTest, InvalidIntervalStep1) {
DimensionIndexBuffer buffer;
EXPECT_THAT(NormalizeDimRangeSpec(DimRangeSpec{3, 1, 1}, 5, &buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"3:1 is not a valid range"));
}
TEST(NormalizeDimRangeSpecTest, InvalidIntervalStepNeg1) {
DimensionIndexBuffer buffer;
EXPECT_THAT(NormalizeDimRangeSpec(DimRangeSpec{1, 3, -1}, 5, &buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"1:3:-1 is not a valid range"));
}
TEST(NormalizeDimRangeSpecTest, InvalidIndex) {
DimensionIndexBuffer buffer;
EXPECT_THAT(NormalizeDimRangeSpec(DimRangeSpec{1, 8, 1}, 5, &buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension exclusive stop index 8 is outside valid "
"range \\[-6, 5\\]"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_identifier.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_identifier_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ac2af6eb-d6da-4cea-91cc-9358932ac908 | cpp | google/tensorstore | transformed_array | tensorstore/index_space/transformed_array.cc | tensorstore/index_space/transformed_array_test.cc | #include "tensorstore/index_space/transformed_array.h"
#include <stddef.h>
#include <algorithm>
#include <array>
#include <cassert>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/identity_transform.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/internal/propagate_bounds.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/constant_vector.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/internal/iterate_impl.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
std::string DescribeTransformedArrayForCast(DataType dtype,
DimensionIndex rank) {
return tensorstore::StrCat(
"transformed array with ", StaticCastTraits<DataType>::Describe(dtype),
" and ", StaticCastTraits<DimensionIndex>::Describe(rank));
}
namespace {
void MultiplyByteStridesIntoOutputIndexMaps(TransformRep* transform,
span<const Index> byte_strides) {
const span<OutputIndexMap> output_maps = transform->output_index_maps();
assert(byte_strides.size() == output_maps.size());
for (DimensionIndex i = 0; i < byte_strides.size(); ++i) {
auto& map = output_maps[i];
const Index byte_stride = byte_strides[i];
const Index stride =
internal::wrap_on_overflow::Multiply(map.stride(), byte_stride);
if (stride == 0) {
map.SetConstant();
}
map.stride() = stride;
map.offset() =
internal::wrap_on_overflow::Multiply(map.offset(), byte_stride);
}
}
}
absl::Status CopyTransformedArrayImpl(TransformedArrayView<const void> source,
TransformedArrayView<void> dest) {
TENSORSTORE_ASSIGN_OR_RETURN(auto r, internal::GetDataTypeConverterOrError(
source.dtype(), dest.dtype()));
absl::Status status;
using TA = TransformedArrayView<const void>;
TENSORSTORE_ASSIGN_OR_RETURN(auto success,
internal::IterateOverTransformedArrays<2>(
r.closure, &status, skip_repeated_elements,
span<const TA, 2>({source, TA(dest)})));
if (!success) {
return internal::GetElementCopyErrorStatus(std::move(status));
}
return status;
}
TransformRep::Ptr<> MakeTransformFromStridedLayout(
StridedLayoutView<dynamic_rank, offset_origin> layout) {
auto result = MakeIdentityTransform(layout.domain());
MultiplyByteStridesIntoOutputIndexMaps(result.get(), layout.byte_strides());
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
Result<TransformRep::Ptr<>> MakeTransformFromStridedLayoutAndTransform(
StridedLayoutView<dynamic_rank, offset_origin> layout,
TransformRep::Ptr<> transform) {
if (!transform) return MakeTransformFromStridedLayout(layout);
if (transform->output_rank != layout.rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform output rank (", transform->output_rank,
") does not equal array rank (", layout.rank(), ")"));
}
TENSORSTORE_ASSIGN_OR_RETURN(
transform, PropagateExplicitBoundsToTransform(layout.domain(),
std::move(transform)));
MultiplyByteStridesIntoOutputIndexMaps(transform.get(),
layout.byte_strides());
internal_index_space::DebugCheckInvariants(transform.get());
return transform;
}
StridedLayoutView<dynamic_rank, offset_origin> GetUnboundedLayout(
DimensionIndex rank) {
return StridedLayoutView<dynamic_rank, offset_origin>(
rank, GetConstantVector<Index, -kInfIndex>(rank).data(),
GetConstantVector<Index, kInfSize>(rank).data(),
GetConstantVector<Index, 1>(rank).data());
}
}
namespace internal {
template <size_t Arity>
Result<bool> IterateOverTransformedArrays(
ElementwiseClosure<Arity, void*> closure, void* arg,
IterationConstraints constraints,
span<const TransformedArrayView<const void>, Arity> transformed_arrays) {
if (Arity == 0) return true;
const DimensionIndex input_rank = transformed_arrays[0].rank();
namespace flags = internal_index_space::input_dimension_iteration_flags;
flags::Bitmask input_dimension_flags[kMaxRank];
std::fill_n(
&input_dimension_flags[0], input_rank,
flags::GetDefaultBitmask(constraints.repeated_elements_constraint()));
internal_index_space::SingleArrayIterationState single_array_states[Arity];
Box<dynamic_rank(kNumInlinedDims)> input_bounds(input_rank);
bool failed = false;
for (size_t i = 0; i < Arity; ++i) {
if (transformed_arrays[i].domain().rank() != input_rank) {
failed = true;
}
}
if (failed) {
DimensionIndex transformed_ranks[Arity];
for (size_t i = 0; i < Arity; ++i) {
transformed_ranks[i] = transformed_arrays[i].domain().rank();
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Transformed array input ranks ",
span(transformed_ranks), " do not all match"));
}
for (size_t i = 0; i < Arity; ++i) {
const BoxView<> domain = transformed_arrays[i].domain().box();
TENSORSTORE_RETURN_IF_ERROR(
internal_index_space::ValidateAndIntersectBounds(
domain, input_bounds, [](IndexInterval a, IndexInterval b) {
return AreCompatibleOrUnbounded(a, b);
}));
}
for (DimensionIndex i = 0; i < input_rank; ++i) {
if (input_bounds.shape()[i] == 0) {
return true;
}
}
bool has_array_indexed_output_dimensions = false;
for (size_t i = 0; i < Arity; ++i) {
const auto& ta = transformed_arrays[i];
auto& single_array_state = single_array_states[i];
TENSORSTORE_RETURN_IF_ERROR(
internal_index_space::InitializeSingleArrayIterationState(
ta.element_pointer(),
internal_index_space::TransformAccess::rep(ta.transform()),
input_bounds.origin().data(), input_bounds.shape().data(),
&single_array_state, &input_dimension_flags[0]));
if (single_array_state.num_array_indexed_output_dimensions) {
has_array_indexed_output_dimensions = true;
}
}
std::array<std::ptrdiff_t, Arity> element_sizes;
for (size_t i = 0; i < Arity; ++i) {
element_sizes[i] = transformed_arrays[i].dtype()->size;
}
if (!has_array_indexed_output_dimensions) {
std::array<ByteStridedPointer<void>, Arity> pointers;
std::array<const Index*, Arity> strides;
for (size_t i = 0; i < Arity; ++i) {
pointers[i] = single_array_states[i].base_pointer;
strides[i] = &single_array_states[i].input_byte_strides[0];
}
return IterateOverStridedLayouts<Arity>(closure, arg, input_bounds.shape(),
pointers, strides, constraints,
element_sizes);
}
internal_index_space::MarkSingletonDimsAsSkippable(input_bounds.shape(),
&input_dimension_flags[0]);
internal_index_space::SimplifiedDimensionIterationOrder layout =
internal_index_space::SimplifyDimensionIterationOrder<Arity>(
internal_index_space::ComputeDimensionIterationOrder<Arity>(
single_array_states, span(input_dimension_flags, input_rank),
constraints.order_constraint()),
input_bounds.shape(), single_array_states);
return internal_index_space::IterateUsingSimplifiedLayout<Arity>(
layout, input_bounds.shape(), closure, arg, single_array_states,
element_sizes);
}
#define TENSORSTORE_DO_INSTANTIATE_ITERATE_OVER_TRANSFORMED_ARRAYS(Arity) \
template Result<bool> IterateOverTransformedArrays<Arity>( \
ElementwiseClosure<Arity, void*> closure, void* arg, \
IterationConstraints constraints, \
span<const TransformedArrayView<const void>, Arity> transformed_arrays); \
TENSORSTORE_INTERNAL_FOR_EACH_ARITY(
TENSORSTORE_DO_INSTANTIATE_ITERATE_OVER_TRANSFORMED_ARRAYS)
#undef TENSORSTORE_DO_INSTANTIATE_ITERATE_OVER_TRANSFORMED_ARRAYS
Result<ElementPointer<Shared<const void>>> TryConvertToArrayImpl(
ElementPointer<Shared<const void>> element_pointer,
IndexTransformView<> transform, Index* output_origin, Index* output_shape,
Index* output_byte_strides) {
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_rank = transform.output_rank();
if (output_origin) {
std::copy_n(transform.input_origin().begin(), input_rank, output_origin);
}
std::copy_n(transform.input_shape().begin(), input_rank, output_shape);
Index offset = 0;
std::fill_n(output_byte_strides, input_rank, Index(0));
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto map = transform.output_index_map(output_dim);
offset = internal::wrap_on_overflow::Add(offset, map.offset());
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
output_byte_strides[input_dim] = internal::wrap_on_overflow::Add(
output_byte_strides[input_dim], map.stride());
break;
}
case OutputIndexMethod::array:
return absl::InvalidArgumentError(
"Cannot view transformed array with index arrays as a strided "
"array");
}
}
if (!output_origin) {
offset = internal::wrap_on_overflow::Add(
offset, IndexInnerProduct(input_rank, transform.input_origin().data(),
output_byte_strides));
}
return AddByteOffset(std::move(element_pointer), offset);
}
}
} | #include "tensorstore/index_space/transformed_array.h"
#include <stddef.h>
#include <stdint.h>
#include <random>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/box.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/transform_array_constraints.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::Shared;
using ::tensorstore::StaticDataTypeCast;
using ::tensorstore::StaticRankCast;
using ::tensorstore::TransformedArray;
using ::tensorstore::dtypes::float32_t;
static_assert(std::is_convertible_v<tensorstore::TransformedSharedArray<int, 1>,
tensorstore::TransformedArrayView<int, 1>>);
static_assert(
!std::is_convertible_v<tensorstore::TransformedArrayView<int, 1>,
tensorstore::TransformedSharedArray<int, 1>>);
static_assert(std::is_convertible_v<tensorstore::TransformedArrayView<int, 1>,
tensorstore::TransformedArray<int, 1>>);
static_assert(
std::is_same_v<typename tensorstore::TransformedArrayView<int, 1>::
template RebindContainerKind<tensorstore::container>,
tensorstore::TransformedArray<int, 1>>);
static_assert(tensorstore::HasBoxDomain<tensorstore::TransformedArray<int, 1>>);
template <typename TA>
std::vector<const typename TA::Element*> GetPointers(const TA& a) {
using Element = const typename TA::Element;
std::vector<Element*> pointers;
auto result = IterateOverTransformedArrays(
[&](Element* x) { pointers.push_back(x); },
tensorstore::skip_repeated_elements, a);
EXPECT_TRUE(result);
return pointers;
}
using TransformedArrayTestTypes =
::testing::Types<tensorstore::TransformedSharedArray<int>,
tensorstore::TransformedSharedArray<int, 1>>;
template <typename T>
class TransformedArrayConstructorTest : public ::testing::Test {};
TYPED_TEST_SUITE(TransformedArrayConstructorTest, TransformedArrayTestTypes);
template <typename TransformedArrayType, typename SourceArray>
void TestCopyAndMove(SourceArray&& source,
std::vector<const int*> expected_pointers) {
{
TransformedArrayType tb(source);
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(tb));
EXPECT_EQ(expected_pointers, GetPointers(tb));
}
{
auto source_copy = source;
TransformedArrayType tc(std::move(source_copy));
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(tc));
EXPECT_EQ(expected_pointers, GetPointers(tc));
}
{
TransformedArrayType td;
td = source;
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(td));
EXPECT_EQ(expected_pointers, GetPointers(td));
}
{
auto source_copy = source;
TransformedArrayType td;
td = std::move(source_copy);
EXPECT_EQ(expected_pointers, GetPointers(td));
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(td));
}
}
TYPED_TEST(TransformedArrayConstructorTest, DefaultConstruct) {
TypeParam ta;
EXPECT_FALSE(ta.transform());
EXPECT_EQ(nullptr, ta.element_pointer());
}
template <typename TransformedArrayType, typename Array>
void TestConstructFromArray(Array&& array,
std::vector<const int*> expected_pointers) {
auto array_copy = array;
TransformedArrayType ta(std::forward<Array>(array));
EXPECT_EQ(array_copy.domain(), ta.domain().box());
EXPECT_EQ(array_copy.domain(), GetBoxDomainOf(ta));
auto pointers = GetPointers(ta);
EXPECT_EQ(expected_pointers, pointers);
TestCopyAndMove<TransformedArrayType>(ta, expected_pointers);
TestCopyAndMove<typename TransformedArrayType::template RebindContainerKind<
tensorstore::container>>(ta, expected_pointers);
}
TYPED_TEST(TransformedArrayConstructorTest, ConstructFromZeroOriginArray) {
auto a = MakeArray<int>({1, 2, 3});
const std::vector<const int*> expected_pointers{&a(0), &a(1), &a(2)};
TestConstructFromArray<TypeParam>(a, expected_pointers);
TestConstructFromArray<TypeParam>(tensorstore::SharedArrayView<int, 1>(a),
expected_pointers);
}
TYPED_TEST(TransformedArrayConstructorTest, ConstructFromOffsetOriginArray) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
const std::vector<const int*> expected_pointers{&a(3), &a(4), &a(5)};
TestConstructFromArray<TypeParam>(a, expected_pointers);
TestConstructFromArray<TypeParam>(
tensorstore::SharedOffsetArrayView<int, 1>(a), expected_pointers);
}
template <typename TransformedArrayType, typename ElementPointer,
typename Transform>
void TestConstructFromElementPointerAndTransform(
ElementPointer&& element_pointer, Transform&& transform,
std::vector<const int*> expected_pointers) {
auto element_pointer_copy = element_pointer;
auto transform_copy = transform;
TransformedArrayType ta(std::forward<ElementPointer>(element_pointer),
std::forward<Transform>(transform));
EXPECT_EQ(GetBoxDomainOf(transform_copy), GetBoxDomainOf(ta));
EXPECT_EQ(transform_copy, ta.transform());
EXPECT_EQ(element_pointer_copy, ta.element_pointer());
auto pointers = GetPointers(ta);
EXPECT_EQ(expected_pointers, pointers);
TestCopyAndMove<TransformedArrayType>(ta, expected_pointers);
TestCopyAndMove<typename TransformedArrayType::template RebindContainerKind<
tensorstore::container>>(ta, expected_pointers);
}
TYPED_TEST(TransformedArrayConstructorTest,
ConstructFromElementPointerAndTransform) {
auto a = MakeArray<int>({1, 2, 3});
const std::vector<const int*> expected_pointers{&a(0), &a(1), &a(2)};
auto t = tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_single_input_dimension(0, 0, sizeof(int), 0)
.Finalize()
.value();
TestConstructFromElementPointerAndTransform<TypeParam>(a.element_pointer(), t,
expected_pointers);
auto element_pointer = a.element_pointer();
auto t_copy = t;
TestConstructFromElementPointerAndTransform<TypeParam>(
std::move(element_pointer), std::move(t_copy), expected_pointers);
tensorstore::IndexTransformView<1, 1> t_view = t;
TestConstructFromElementPointerAndTransform<TypeParam>(
a.element_pointer(), t_view, expected_pointers);
}
TEST(TransformedArrayTest, Array) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
auto ta = tensorstore::TransformedArray(a);
static_assert(std::is_same_v<decltype(ta),
tensorstore::TransformedSharedArray<int, 1>>);
auto a_copy = a;
EXPECT_EQ(3, a.pointer().use_count());
auto tb = tensorstore::TransformedArray(std::move(a_copy));
static_assert(std::is_same_v<decltype(tb),
tensorstore::TransformedSharedArray<int, 1>>);
EXPECT_EQ(3, a.pointer().use_count());
EXPECT_FALSE(a_copy.valid());
}
TEST(TransformedArrayTest, TransformedArray) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
auto ta = tensorstore::TransformedArray(a);
auto tb = tensorstore::TransformedArray(ta);
static_assert(std::is_same_v<decltype(tb),
tensorstore::TransformedSharedArray<int, 1>>);
auto ta_copy = ta;
EXPECT_EQ(4, a.pointer().use_count());
auto tc = tensorstore::TransformedArray(std::move(ta_copy));
static_assert(std::is_same_v<decltype(tc),
tensorstore::TransformedSharedArray<int, 1>>);
EXPECT_EQ(a.element_pointer(), tc.element_pointer());
EXPECT_EQ(4, a.pointer().use_count());
EXPECT_FALSE(ta_copy.element_pointer());
}
TEST(TransformedArrayTest, MapTransform) {
auto array = MakeArray<int>({1, 2, 3});
tensorstore::TransformedArray<int, 1> tarray(array);
auto tarray2 =
ChainResult(tarray, tensorstore::Dims(0).SizedInterval(1, 2)).value();
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 3}), tarray2.Materialize().value());
}
TEST(TransformedArrayTest, ArrayAndTransform) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t, (tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.input_labels({"a"})
.output_single_input_dimension(0, 3, 1, 0)
.Finalize()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ta,
tensorstore::MakeTransformedArray(a, t));
static_assert(std::is_same_v<decltype(ta),
tensorstore::TransformedSharedArray<int, 1>>);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto expected_transform, (tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.input_labels({"a"})
.output_single_input_dimension(
0, 3 * sizeof(int), 1 * sizeof(int), 0)
.Finalize()));
EXPECT_EQ(expected_transform, ta.transform());
}
TEST(TransformedArrayTest, DimExpression) {
auto a = MakeOffsetArray<int>({10, 20}, {{1, 2, 3}, {4, 5, 6}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto ta, a |
tensorstore::Dims(0, 1).IndexVectorArraySlice(
MakeArray<Index>({{10, 22}, {11, 21}, {11, 22}})) |
tensorstore::Dims(0).Label("a"));
EXPECT_EQ(ta.transform(),
(tensorstore::IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({3})
.input_labels({"a"})
.output_index_array(0, 0, sizeof(int) * 3,
MakeArray<Index>({10, 11, 11}),
IndexInterval::Sized(10, 2))
.output_index_array(1, 0, sizeof(int),
MakeArray<Index>({22, 21, 22}),
IndexInterval::Sized(20, 3))
.Finalize()
.value()));
EXPECT_EQ(a.element_pointer(), ta.element_pointer());
EXPECT_EQ(ta.domain().box(), tensorstore::BoxView<1>({3}));
}
TEST(TransformedArrayTest, MaterializeWithOffsetOrigin) {
EXPECT_EQ(MakeOffsetArray<int>({2}, {3, 5, 6}),
ChainResult(MakeOffsetArray<int>({10, 20}, {{1, 2, 3}, {4, 5, 6}}),
tensorstore::Dims(0, 1)
.IndexVectorArraySlice(MakeArray<Index>(
{{10, 22}, {11, 21}, {11, 22}}))
.TranslateTo(2))
.value()
.Materialize());
}
TEST(TransformedArrayTest, MaterializeWithZeroOrigin) {
EXPECT_EQ(MakeArray<int>({3, 5, 6}),
ChainResult(MakeOffsetArray<int>({10, 20}, {{1, 2, 3}, {4, 5, 6}}),
tensorstore::Dims(0, 1)
.IndexVectorArraySlice(MakeArray<Index>(
{{10, 22}, {11, 21}, {11, 22}}))
.TranslateTo(2))
.value()
.template Materialize<tensorstore::zero_origin>()
.value());
}
TEST(TransformedArrayTest, MaterializeConstraints) {
auto array = MakeOffsetArray<int>({2, 3}, {{3, 4, 5}, {6, 7, 8}});
auto transformed_array =
ChainResult(array,
tensorstore::Dims(1)
.ClosedInterval(kImplicit, kImplicit, 2)
.MoveToFront(),
tensorstore::Dims(2).AddNew().SizedInterval(5, 3))
.value();
auto expected_array = MakeOffsetArray<int>(
{1, 2, 5}, {{{3, 3, 3}, {6, 6, 6}}, {{5, 5, 5}, {8, 8, 8}}});
{
auto new_array = transformed_array.Materialize().value();
EXPECT_EQ(GetPointers(transformed_array), GetPointers(new_array));
}
const auto ValidateCopy =
[&](const Result<tensorstore::SharedOffsetArray<const int, 3>>& new_array,
const std::vector<Index>& expected_byte_strides) {
TENSORSTORE_ASSERT_OK(new_array);
EXPECT_NE(GetPointers(transformed_array), GetPointers(*new_array));
EXPECT_EQ(expected_array, *new_array);
EXPECT_THAT(new_array->byte_strides(),
::testing::ElementsAreArray(expected_byte_strides));
};
const auto TestCopyAndMaterialize =
[&](tensorstore::TransformArrayConstraints constraints,
std::vector<Index> expected_byte_strides) {
SCOPED_TRACE(tensorstore::StrCat("TestCopyAndMaterialize: constraints=",
constraints.value()));
{
SCOPED_TRACE("Materialize");
auto new_array = transformed_array.Materialize(constraints);
static_assert(std::is_same_v<
decltype(new_array),
Result<tensorstore::SharedOffsetArray<const int, 3>>>);
ValidateCopy(new_array, expected_byte_strides);
}
{
SCOPED_TRACE("MakeCopy");
auto new_array =
MakeCopy(transformed_array, constraints.iteration_constraints());
static_assert(
std::is_same_v<decltype(new_array),
Result<tensorstore::SharedOffsetArray<int, 3>>>);
ValidateCopy(new_array, expected_byte_strides);
}
};
TestCopyAndMaterialize(
{tensorstore::skip_repeated_elements, tensorstore::must_allocate},
{sizeof(int), sizeof(int) * 2, 0});
TestCopyAndMaterialize(
{tensorstore::c_order, tensorstore::skip_repeated_elements,
tensorstore::must_allocate},
{sizeof(int) * 2, sizeof(int), 0});
TestCopyAndMaterialize(
{tensorstore::fortran_order, tensorstore::skip_repeated_elements,
tensorstore::must_allocate},
{sizeof(int), sizeof(int) * 2, 0});
TestCopyAndMaterialize(
{tensorstore::fortran_order, tensorstore::include_repeated_elements,
tensorstore::must_allocate},
{sizeof(int), sizeof(int) * 2, sizeof(int) * 2 * 2});
TestCopyAndMaterialize(
{tensorstore::c_order, tensorstore::include_repeated_elements,
tensorstore::must_allocate},
{sizeof(int) * 2 * 3, sizeof(int) * 3, sizeof(int)});
}
TEST(TransformedArrayTest, MaterializeError) {
EXPECT_THAT(
ChainResult(MakeArray<int>({1, 2}), tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({3, 4})))
.value()
.Materialize(),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(TransformedArrayTest, MakeCopy) {
EXPECT_THAT(MakeCopy(ChainResult(MakeArray<int>({1, 2}),
tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({3, 4})))
.value()),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(TransformedArrayTest, MoveConstructViewFromContainer) {
MapResult(
[](tensorstore::TransformedSharedArrayView<const void> x) {
EXPECT_EQ(tensorstore::BoxView({2, 3}, {2, 2}), GetBoxDomainOf(x));
return absl::OkStatus();
},
tensorstore::MakeTransformedArray(
tensorstore::MakeOffsetArray<int>({2, 3}, {{1, 2}, {3, 4}}),
tensorstore::IdentityTransform(tensorstore::BoxView({2, 3}, {2, 2}))))
.value();
}
TEST(ComposeLayoutAndTransformTest, NoTransform) {
tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>
layout({1, 2}, {3, 4}, {5, 6});
auto transform = tensorstore::ComposeLayoutAndTransform(
layout, tensorstore::IndexTransform<>())
.value();
EXPECT_EQ(transform, tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, 4})
.output_single_input_dimension(0, 0, 5, 0)
.output_single_input_dimension(1, 0, 6, 1)
.Finalize()
.value());
}
TEST(ComposeLayoutAndTransformTest, ExistingTransform) {
tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>
layout({1, 2}, {3, 4}, {5, 6});
auto transform = tensorstore::ComposeLayoutAndTransform(
layout, tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({11, 12})
.input_shape({3, 2})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -10, 1, 0)
.output_single_input_dimension(1, -22, 2, 1)
.Finalize()
.value())
.value();
EXPECT_EQ(transform, tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({11, 12})
.input_shape({3, 2})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -10 * 5, 1 * 5, 0)
.output_single_input_dimension(1, -22 * 6, 2 * 6, 1)
.Finalize()
.value());
}
TEST(ComposeLayoutAndTransformTest, RankMismatch) {
tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>
layout({1, 2}, {3, 4}, {5, 6});
EXPECT_THAT(tensorstore::ComposeLayoutAndTransform(
layout, tensorstore::IdentityTransform(3)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform output rank \\(3\\) does not equal "
"array rank \\(2\\)"));
}
TEST(MakeTransformedArrayTest, TwoArgumentBaseArrayAndTransform) {
auto array = MakeOffsetArray<int>({2, 3}, {{3, 4, 5}, {6, 7, 8}});
auto t = tensorstore::IndexTransformBuilder<1, 2>()
.implicit_lower_bounds({1})
.implicit_upper_bounds({1})
.output_single_input_dimension(0, 1, 1, 0)
.output_single_input_dimension(1, 2, 1, 0)
.Finalize()
.value();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ta,
tensorstore::MakeTransformedArray(array, t));
EXPECT_EQ(array.element_pointer(), ta.element_pointer());
EXPECT_EQ(
tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, sizeof(int) * 3, sizeof(int) * 3, 0)
.output_single_input_dimension(1, sizeof(int) * 2, sizeof(int), 0)
.Finalize()
.value(),
ta.transform());
}
TEST(GetUnboundedLayoutTest, Basic) {
EXPECT_EQ((tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>(
{-kInfIndex, -kInfIndex}, {kInfSize, kInfSize}, {1, 1})),
tensorstore::internal_index_space::GetUnboundedLayout(2));
}
TEST(TransformedArrayTest, StaticDataTypeCast) {
TransformedArray<int32_t, 1> ta_orig = MakeArray<int32_t>({3, 4});
TransformedArray<void, 1> ta = ta_orig;
auto ta_int = StaticDataTypeCast<int32_t>(ta);
static_assert(
std::is_same_v<decltype(ta_int), Result<TransformedArray<int, 1>>>);
ASSERT_TRUE(ta_int);
EXPECT_THAT(GetPointers(*ta_int),
::testing::ElementsAreArray(GetPointers(ta_orig)));
}
TEST(TransformedArrayTest, CastArrayToTransformedArray) {
tensorstore::SharedArray<int32_t> a = MakeArray<int32_t>({1, 2});
auto ta_result =
tensorstore::StaticCast<tensorstore::TransformedArrayView<int32_t, 1>>(a);
TENSORSTORE_ASSERT_OK(ta_result);
EXPECT_THAT(GetPointers(*ta_result), ::testing::ElementsAre(&a(0), &a(1)));
}
TEST(TransformedArrayTest, StaticDataTypeCastShared) {
auto ta_orig = tensorstore::TransformedArray(MakeArray<int32_t>({3, 4}));
TransformedArray<Shared<void>, 1> ta = ta_orig;
auto ta_int = StaticDataTypeCast<int32_t>(ta);
static_assert(std::is_same_v<decltype(ta_int),
Result<TransformedArray<Shared<int32_t>, 1>>>);
ASSERT_TRUE(ta_int);
EXPECT_THAT(GetPointers(*ta_int),
::testing::ElementsAreArray(GetPointers(ta_orig)));
}
TEST(TransformedArrayTest, StaticRankCast) {
TransformedArray<Shared<int32_t>, dynamic_rank> ta =
MakeArray<int32_t>({3, 4});
auto ta1 = StaticRankCast<1>(ta);
static_assert(std::is_same_v<decltype(ta1),
Result<TransformedArray<Shared<int32_t>, 1>>>);
ASSERT_TRUE(ta1);
EXPECT_THAT(GetPointers(*ta1), ::testing::ElementsAreArray(GetPointers(ta)));
EXPECT_THAT(
StaticRankCast<2>(ta),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast transformed array with data type of int32 and rank of 1 "
"to transformed array with data type of int32 and rank of 2"));
}
TEST(TransformedArrayTest, ApplyIndexTransform) {
auto array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto result = ChainResult(array, tensorstore::IdentityTransform<2>());
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(array, MakeCopy(*result));
}
TEST(CopyTransformedArrayTest, Int32ToUint32) {
auto a = MakeArray<int32_t>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::AllocateArray<uint32_t>({3, 2});
EXPECT_EQ(absl::OkStatus(),
CopyTransformedArray(
a, ChainResult(b, tensorstore::Dims(1, 0).Transpose())));
EXPECT_EQ(b, MakeArray<uint32_t>({{1, 4}, {2, 5}, {3, 6}}));
}
TEST(CopyTransformedArrayTest, Int32ToInt32) {
auto a = MakeArray<int32_t>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::AllocateArray<int32_t>({3, 2});
EXPECT_EQ(absl::OkStatus(),
CopyTransformedArray(
a, ChainResult(b, tensorstore::Dims(1, 0).Transpose())));
EXPECT_EQ(b, MakeArray<int32_t>({{1, 4}, {2, 5}, {3, 6}}));
}
TEST(CopyTransformedArrayTest, Int32ToFloat32) {
auto a = MakeArray<int32_t>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::AllocateArray<float32_t>({3, 2});
EXPECT_EQ(absl::OkStatus(),
CopyTransformedArray(
ChainResult(a, tensorstore::Dims(1, 0).Transpose()), b));
EXPECT_EQ(b, MakeArray<float32_t>({{1.0, 4.0}, {2.0, 5.0}, {3.0, 6.0}}));
}
TEST(CopyTransformedArrayTest, InvalidDataType) {
auto a = MakeArray<::tensorstore::dtypes::string_t>({"x", "y"});
auto b = tensorstore::AllocateArray<float32_t>({2});
EXPECT_THAT(CopyTransformedArray(a, b),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot convert string -> float32"));
}
TEST(TransformedArrayTest, UnownedToShared) {
auto a = MakeArray<int>({1, 2, 3});
TransformedArray<int> ta = a;
auto shared_ta = UnownedToShared(ta);
static_assert(
std::is_same_v<decltype(shared_ta), TransformedArray<Shared<int>>>);
}
TEST(TransformedArrayTest, UnownedToSharedAliasing) {
auto a = MakeArray<int>({1, 2, 3});
TransformedArray<int> ta = a;
EXPECT_EQ(1, a.pointer().use_count());
{
auto shared_ta = UnownedToShared(a.pointer(), ta);
EXPECT_EQ(2, a.pointer().use_count());
static_assert(
std::is_same_v<decltype(shared_ta), TransformedArray<Shared<int>>>);
auto shared_ta_copy = UnownedToShared(shared_ta);
static_assert(
std::is_same_v<decltype(shared_ta), TransformedArray<Shared<int>>>);
EXPECT_EQ(3, a.pointer().use_count());
}
EXPECT_EQ(1, a.pointer().use_count());
}
TEST(TryConvertToArrayTest, Basic) {
auto array = tensorstore::AllocateArray<int32_t>({2, 3}, tensorstore::c_order,
tensorstore::value_init);
EXPECT_THAT(array | tensorstore::IdentityTransform<2>() |
tensorstore::TryConvertToArray(),
::testing::Optional(tensorstore::ReferencesSameDataAs(array)));
EXPECT_THAT(array | tensorstore::Dims(0).IndexSlice(1) |
tensorstore::TryConvertToArray(),
::testing::Optional(tensorstore::ReferencesSameDataAs(array[1])));
EXPECT_THAT(array | tensorstore::Dims(0).TranslateTo(1) |
tensorstore::TryConvertToArray<tensorstore::zero_origin>(),
::testing::Optional(tensorstore::ReferencesSameDataAs(array)));
EXPECT_THAT(array |
tensorstore::Dims(0).OuterIndexArraySlice(
tensorstore::MakeArray<Index>({0, 1, 1})) |
tensorstore::TryConvertToArray(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(TryConvertToArrayTest, Random) {
tensorstore::SharedArray<const void> array =
tensorstore::AllocateArray<int32_t>({2, 3}, tensorstore::c_order,
tensorstore::value_init);
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_VIEW_AS_ARRAY")};
constexpr size_t kNumIterations = 10;
for (size_t iter_i = 0; iter_i < kNumIterations; ++iter_i) {
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters p;
p.max_stride = 2;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IndexDomain<>(array.domain()), p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_zero_origin,
array | transform |
tensorstore::Materialize<tensorstore::zero_origin>());
EXPECT_THAT(array | transform |
tensorstore::TryConvertToArray<tensorstore::zero_origin>(),
::testing::Optional(materialized_zero_origin));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_offset_origin,
array | transform | tensorstore::Materialize());
EXPECT_THAT(array | transform | tensorstore::TryConvertToArray(),
::testing::Optional(materialized_offset_origin));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transformed_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transformed_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
efe8eb53-2350-4abc-aafc-adeb5a15e0e5 | cpp | google/tensorstore | dimension_units | tensorstore/index_space/dimension_units.cc | tensorstore/index_space/dimension_units_test.cc | #include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
Result<DimensionUnitsVector> TransformInputDimensionUnits(
IndexTransformView<> transform, DimensionUnitsVector input_units) {
if (!transform.valid()) return input_units;
const DimensionIndex input_rank = transform.input_rank(),
output_rank = transform.output_rank();
assert(input_units.size() == input_rank);
std::optional<Unit> output_units[kMaxRank];
DimensionSet seen_input_dims;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const Index stride = map.stride();
if (stride == 0) continue;
const DimensionIndex input_dim = map.input_dimension();
const auto& input_unit = input_units[input_dim];
if (!input_unit) continue;
seen_input_dims[input_dim] = true;
auto& output_unit = output_units[output_dim];
output_unit = input_unit;
*output_unit /= std::abs(static_cast<double>(stride));
}
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (!input_units[input_dim] || seen_input_dims[input_dim]) continue;
return absl::InvalidArgumentError(tensorstore::StrCat(
"No output dimension corresponds to input dimension ", input_dim,
" with unit ", *input_units[input_dim]));
}
input_units.resize(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
input_units[output_dim] = std::move(output_units[output_dim]);
}
return input_units;
}
DimensionUnitsVector TransformOutputDimensionUnits(
IndexTransformView<> transform, DimensionUnitsVector output_units) {
if (!transform.valid()) return output_units;
const DimensionIndex input_rank = transform.input_rank(),
output_rank = transform.output_rank();
assert(output_units.size() == output_rank);
DimensionSet one_to_one_input_dims =
internal::GetOneToOneInputDimensions(transform).one_to_one;
std::optional<Unit> input_units[kMaxRank];
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& output_unit = output_units[output_dim];
if (!output_unit) continue;
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const Index stride = map.stride();
if (stride == 0) continue;
const DimensionIndex input_dim = map.input_dimension();
if (!one_to_one_input_dims[input_dim]) continue;
auto& input_unit = input_units[input_dim];
input_unit = output_unit;
*input_unit *= std::abs(static_cast<double>(stride));
}
output_units.resize(input_rank);
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
output_units[input_dim] = std::move(input_units[input_dim]);
}
return output_units;
}
absl::Status MergeDimensionUnits(DimensionUnitsVector& existing_units,
span<const std::optional<Unit>> new_units) {
assert(existing_units.empty() || existing_units.size() == new_units.size());
existing_units.resize(new_units.size());
for (size_t i = 0; i < new_units.size(); ++i) {
auto& existing_unit = existing_units[i];
auto& new_unit = new_units[i];
if (!new_unit) continue;
if (existing_unit && existing_unit != new_unit) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot merge dimension units ", DimensionUnitsToString(new_units),
" and ", DimensionUnitsToString(existing_units)));
}
}
for (size_t i = 0; i < new_units.size(); ++i) {
auto& existing_unit = existing_units[i];
auto& new_unit = new_units[i];
if (!new_unit || existing_unit) continue;
existing_unit = new_unit;
}
return absl::OkStatus();
}
std::string DimensionUnitsToString(span<const std::optional<Unit>> u) {
std::string result = "[";
std::string_view sep = "";
for (const auto& unit : u) {
result += sep;
sep = ", ";
if (!unit) {
result += "null";
} else {
result += tensorstore::QuoteString(tensorstore::StrCat(*unit));
}
}
result += "]";
return result;
}
} | #include "tensorstore/index_space/dimension_units.h"
#include <stddef.h>
#include <iterator>
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/unit.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionUnitsToString;
using ::tensorstore::DimensionUnitsVector;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MergeDimensionUnits;
using ::tensorstore::TransformInputDimensionUnits;
using ::tensorstore::TransformOutputDimensionUnits;
using ::tensorstore::Unit;
TEST(DimensionUnitsToStringTest, Basic) {
EXPECT_EQ("[null, \"4 nm\"]", DimensionUnitsToString(DimensionUnitsVector{
std::nullopt, Unit("4nm")}));
}
TEST(MergeDimensionUnitsTest, BothUnspecified) {
DimensionUnitsVector existing_units{std::nullopt, std::nullopt};
DimensionUnitsVector new_units{std::nullopt, std::nullopt};
TENSORSTORE_EXPECT_OK(MergeDimensionUnits(existing_units, new_units));
EXPECT_THAT(existing_units,
::testing::ElementsAre(std::nullopt, std::nullopt));
}
TEST(MergeDimensionUnitsTest, OneSpecifiedOneUnspecified) {
DimensionUnitsVector existing_units{std::nullopt, Unit("4nm")};
DimensionUnitsVector new_units{Unit("8nm"), std::nullopt};
TENSORSTORE_EXPECT_OK(MergeDimensionUnits(existing_units, new_units));
EXPECT_THAT(existing_units, ::testing::ElementsAre(Unit("8nm"), Unit("4nm")));
}
TEST(MergeDimensionUnitsTest, BothSpecifiedSame) {
DimensionUnitsVector existing_units{Unit("8nm"), Unit("4nm")};
DimensionUnitsVector new_units{Unit("8nm"), std::nullopt};
TENSORSTORE_EXPECT_OK(MergeDimensionUnits(existing_units, new_units));
EXPECT_THAT(existing_units, ::testing::ElementsAre(Unit("8nm"), Unit("4nm")));
}
TEST(MergeDimensionUnitsTest, BothSpecifiedDistinct) {
DimensionUnitsVector existing_units{std::nullopt, Unit("4nm")};
DimensionUnitsVector new_units{Unit("8nm"), Unit("5nm")};
EXPECT_THAT(
MergeDimensionUnits(existing_units, new_units),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot merge dimension units \\[\"8 nm\", \"5 nm\"\\] "
"and \\[null, \"4 nm\"\\]"));
EXPECT_THAT(existing_units,
::testing::ElementsAre(std::nullopt, Unit("4nm")));
}
std::optional<Unit> MakeRandomUnit(absl::BitGenRef gen) {
constexpr std::string_view kBaseUnits[] = {
"",
"nm",
"um",
};
if (absl::Bernoulli(gen, 0.2)) return std::nullopt;
const double multiplier = absl::Uniform<int>(gen, 5, 20);
const auto base_unit =
kBaseUnits[absl::Uniform<size_t>(gen, 0, std::size(kBaseUnits))];
return Unit(multiplier, std::string(base_unit));
}
DimensionUnitsVector MakeRandomDimensionUnits(DimensionIndex rank,
absl::BitGenRef gen) {
DimensionUnitsVector units(rank);
for (auto& unit : units) {
unit = MakeRandomUnit(gen);
}
return units;
}
TEST(TransformOutputDimensionUnitsTest, InvertibleRoundTrip) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_DIMENSION_UNITS_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto domain = tensorstore::IndexDomain(box);
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain);
auto output_units = MakeRandomDimensionUnits(domain.rank(), gen);
auto input_units = TransformOutputDimensionUnits(transform, output_units);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_transform,
InverseTransform(transform));
EXPECT_THAT(TransformInputDimensionUnits(transform, input_units),
::testing::Optional(::testing::ElementsAreArray(output_units)));
EXPECT_THAT(TransformOutputDimensionUnits(inv_transform, input_units),
::testing::ElementsAreArray(output_units));
}
}
TEST(TransformOutputDimensionUnitsTest, StridedNonInvertibleRoundTrip) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_DIMENSION_UNITS_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto domain = tensorstore::IndexDomain(box);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters p;
p.max_stride = 4;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain, p);
auto output_units = MakeRandomDimensionUnits(domain.rank(), gen);
auto input_units = TransformOutputDimensionUnits(transform, output_units);
EXPECT_THAT(TransformInputDimensionUnits(transform, input_units),
::testing::Optional(::testing::ElementsAreArray(output_units)));
}
}
TEST(TransformInputDimensionUnitsTest, NoCorrespondingOutputDimension) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IndexTransformBuilder(1, 0).Finalize());
DimensionUnitsVector input_units{"4nm"};
EXPECT_THAT(TransformInputDimensionUnits(transform, input_units),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"No output dimension corresponds to "
"input dimension 0 with unit 4 nm"));
}
TEST(TransformOutputDimensionUnitsTest, NonUnique) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IndexTransformBuilder(2, 3)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 1)
.Finalize());
DimensionUnitsVector output_units{"4nm", "5nm", "6nm"};
EXPECT_THAT(TransformOutputDimensionUnits(transform, output_units),
::testing::ElementsAre(std::nullopt, Unit("6nm")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_units.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_units_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
03256495-5ec0-4ac6-9ce5-6a277dd98fb7 | cpp | google/tensorstore | dimension_permutation | tensorstore/index_space/dimension_permutation.cc | tensorstore/index_space/dimension_permutation_test.cc | #include "tensorstore/index_space/dimension_permutation.h"
#include <algorithm>
#include <numeric>
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
void SetPermutation(ContiguousLayoutOrder order,
span<DimensionIndex> permutation) {
if (order == c_order) {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
permutation[i] = i;
}
} else {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
permutation[i] = permutation.size() - 1 - i;
}
}
}
bool IsValidPermutation(span<const DimensionIndex> permutation) {
DimensionSet seen_dims;
const DimensionIndex rank = permutation.size();
if (rank > kMaxRank) return false;
for (DimensionIndex i = 0; i < rank; ++i) {
DimensionIndex dim = permutation[i];
if (dim < 0 || dim >= rank || seen_dims[dim]) {
return false;
}
seen_dims[dim] = true;
}
return true;
}
bool PermutationMatchesOrder(span<const DimensionIndex> permutation,
ContiguousLayoutOrder order) {
if (order == c_order) {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
if (permutation[i] != i) return false;
}
} else {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
if (permutation[i] != permutation.size() - i - 1) return false;
}
}
return true;
}
void InvertPermutation(DimensionIndex rank, const DimensionIndex* perm,
DimensionIndex* inverse_perm) {
assert(IsValidPermutation(span(perm, rank)));
for (DimensionIndex i = 0; i < rank; ++i) {
inverse_perm[perm[i]] = i;
}
}
void SetPermutationFromStridedLayout(StridedLayoutView<> layout,
span<DimensionIndex> permutation) {
assert(layout.rank() == permutation.size());
std::iota(permutation.begin(), permutation.end(), DimensionIndex(0));
const auto get_effective_byte_stride_nabs = [&](DimensionIndex i) -> Index {
const Index byte_stride = layout.byte_strides()[i];
if (byte_stride > 0) return -byte_stride;
return byte_stride;
};
std::stable_sort(permutation.begin(), permutation.end(),
[&](DimensionIndex a, DimensionIndex b) {
return get_effective_byte_stride_nabs(a) <
get_effective_byte_stride_nabs(b);
});
}
void TransformOutputDimensionOrder(IndexTransformView<> transform,
span<const DimensionIndex> output_perm,
span<DimensionIndex> input_perm) {
assert(transform.valid());
assert(IsValidPermutation(output_perm));
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
assert(input_rank == input_perm.size());
assert(output_rank == output_perm.size());
DimensionIndex min_output_dim[kMaxRank];
std::fill_n(min_output_dim, input_rank, kMaxRank);
for (DimensionIndex orig_perm_i = 0; orig_perm_i < output_rank;
++orig_perm_i) {
const DimensionIndex output_dim = output_perm[orig_perm_i];
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const DimensionIndex input_dim = map.input_dimension();
min_output_dim[input_dim] =
std::min(min_output_dim[input_dim], orig_perm_i);
}
std::iota(input_perm.begin(), input_perm.end(), DimensionIndex(0));
std::sort(input_perm.begin(), input_perm.end(),
[&](DimensionIndex a, DimensionIndex b) {
DimensionIndex a_ordinal = min_output_dim[a];
DimensionIndex b_ordinal = min_output_dim[b];
if (a_ordinal != b_ordinal) return a_ordinal < b_ordinal;
return a < b;
});
assert(IsValidPermutation(input_perm));
}
void TransformInputDimensionOrder(IndexTransformView<> transform,
span<const DimensionIndex> input_perm,
span<DimensionIndex> output_perm) {
assert(transform.valid());
assert(IsValidPermutation(input_perm));
[[maybe_unused]] const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
assert(input_rank == input_perm.size());
assert(output_rank == output_perm.size());
DimensionIndex inverse_input_perm[kMaxRank];
InvertPermutation(input_rank, input_perm.data(), inverse_input_perm);
std::iota(output_perm.begin(), output_perm.end(), DimensionIndex(0));
const auto get_output_dim_ordinal = [&](DimensionIndex output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) {
return kMaxRank;
}
return inverse_input_perm[map.input_dimension()];
};
std::sort(output_perm.begin(), output_perm.end(),
[&](DimensionIndex a, DimensionIndex b) {
DimensionIndex a_ordinal = get_output_dim_ordinal(a);
DimensionIndex b_ordinal = get_output_dim_ordinal(b);
if (a_ordinal != b_ordinal) return a_ordinal < b_ordinal;
return a < b;
});
assert(IsValidPermutation(output_perm));
}
} | #include "tensorstore/index_space/dimension_permutation.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::IsValidPermutation;
using ::tensorstore::PermutationMatchesOrder;
using ::tensorstore::span;
TEST(SetPermutationTest, Rank0) {
std::vector<DimensionIndex> permutation(0);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
}
TEST(SetPermutationTest, Rank1COrder) {
std::vector<DimensionIndex> permutation(1, 42);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0));
}
TEST(SetPermutationTest, Rank1FortranOrder) {
std::vector<DimensionIndex> permutation(1, 42);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0));
}
TEST(SetPermutationTest, Rank2COrder) {
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1));
}
TEST(SetPermutationTest, Rank2FortranOrder) {
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(1, 0));
}
TEST(SetPermutationTest, Rank3COrder) {
std::vector<DimensionIndex> permutation(3, 42);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1, 2));
}
TEST(SetPermutationTest, Rank3FortranOrder) {
std::vector<DimensionIndex> permutation(3, 42);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(2, 1, 0));
}
TEST(IsValidPermutationTest, Basic) {
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>()));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({0})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({1})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({-1})));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({0, 1})));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({1, 0})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({1, 1})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({0, 0})));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({1, 2, 0})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({1, 2, 1})));
}
TEST(PermutationMatchesOrderTest, Basic) {
EXPECT_TRUE(PermutationMatchesOrder({}, tensorstore::c_order));
EXPECT_TRUE(PermutationMatchesOrder({}, tensorstore::fortran_order));
EXPECT_TRUE(PermutationMatchesOrder({{0}}, tensorstore::c_order));
EXPECT_TRUE(PermutationMatchesOrder({{0}}, tensorstore::fortran_order));
EXPECT_TRUE(PermutationMatchesOrder({{0, 1}}, tensorstore::c_order));
EXPECT_FALSE(PermutationMatchesOrder({{0, 1}}, tensorstore::fortran_order));
EXPECT_TRUE(PermutationMatchesOrder({{0, 1, 2}}, tensorstore::c_order));
EXPECT_FALSE(PermutationMatchesOrder({{1}}, tensorstore::c_order));
EXPECT_FALSE(PermutationMatchesOrder({{1}}, tensorstore::fortran_order));
EXPECT_FALSE(PermutationMatchesOrder({{1, 0}}, tensorstore::c_order));
EXPECT_TRUE(PermutationMatchesOrder({{1, 0}}, tensorstore::fortran_order));
}
TEST(InvertPermutationTest, Rank0) {
std::vector<DimensionIndex> source;
std::vector<DimensionIndex> dest;
tensorstore::InvertPermutation(0, source.data(), dest.data());
}
TEST(InvertPermutationTest, Rank1) {
std::vector<DimensionIndex> source{0};
std::vector<DimensionIndex> dest(1, 42);
tensorstore::InvertPermutation(1, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(0));
}
TEST(InvertPermutationTest, Rank2Identity) {
std::vector<DimensionIndex> source{0, 1};
std::vector<DimensionIndex> dest(2, 42);
tensorstore::InvertPermutation(2, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(0, 1));
}
TEST(InvertPermutationTest, Rank2Transpose) {
std::vector<DimensionIndex> source{1, 0};
std::vector<DimensionIndex> dest(2, 42);
tensorstore::InvertPermutation(2, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(1, 0));
}
TEST(InvertPermutationTest, Rank3) {
std::vector<DimensionIndex> source{1, 2, 0};
std::vector<DimensionIndex> dest(3, 42);
tensorstore::InvertPermutation(3, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(2, 0, 1));
std::vector<DimensionIndex> source2(3, 42);
tensorstore::InvertPermutation(3, dest.data(), source2.data());
EXPECT_EQ(source, source2);
}
TEST(SetPermutationFromStridedLayoutTest, Rank0) {
tensorstore::StridedLayout<> layout(0);
std::vector<DimensionIndex> permutation(0);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
}
TEST(SetPermutationFromStridedLayoutTest, Rank1) {
tensorstore::StridedLayout<> layout({5}, {10});
std::vector<DimensionIndex> permutation(1, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0));
}
TEST(SetPermutationFromStridedLayoutTest, Rank2COrder) {
tensorstore::StridedLayout<> layout({5, 6}, {10, 5});
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1));
}
TEST(SetPermutationFromStridedLayoutTest, Rank2FortranOrder) {
tensorstore::StridedLayout<> layout({5, 6}, {5, 10});
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(1, 0));
}
TEST(SetPermutationFromStridedLayoutTest, Rank2ZeroStride) {
tensorstore::StridedLayout<> layout({5, 6}, {0, 0});
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1));
}
TEST(SetPermutationFromStridedLayoutTest, Rank4) {
tensorstore::StridedLayout<> layout({5, 6, 7, 8}, {10, 5, 6, 6});
std::vector<DimensionIndex> permutation(4, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 2, 3, 1));
}
TEST(TransformOutputDimensionOrderTest, Rank0) {
std::vector<DimensionIndex> source;
std::vector<DimensionIndex> dest;
tensorstore::TransformOutputDimensionOrder(tensorstore::IdentityTransform(0),
source, dest);
}
TEST(TransformOutputDimensionOrderTest, Rank1Identity) {
std::vector<DimensionIndex> source{0};
std::vector<DimensionIndex> dest(1, 42);
tensorstore::TransformOutputDimensionOrder(tensorstore::IdentityTransform(1),
source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(0));
}
TEST(TransformOutputDimensionOrderTest, Rank2COrderIdentity) {
std::vector<DimensionIndex> source{0, 1};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
auto transform = tensorstore::IdentityTransform(2);
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(0, 1));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
TEST(TransformOutputDimensionOrderTest, Rank2FortranOrderIdentity) {
std::vector<DimensionIndex> source{1, 0};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
auto transform = tensorstore::IdentityTransform(2);
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(1, 0));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
TEST(TransformOutputDimensionOrderTest, Rank2COrderTranspose) {
std::vector<DimensionIndex> source{0, 1};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(2) | Dims(1, 0).Transpose());
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(1, 0));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
TEST(TransformOutputDimensionOrderTest, Rank2FortranOrderTranspose) {
std::vector<DimensionIndex> source{1, 0};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(2) | Dims(1, 0).Transpose());
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(0, 1));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_permutation.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_permutation_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
10851145-94d4-4428-b846-ace1c1731762 | cpp | google/tensorstore | alignment | tensorstore/index_space/alignment.cc | tensorstore/index_space/alignment_test.cc | #include "tensorstore/index_space/alignment.h"
#include <algorithm>
#include <numeric>
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
absl::Status AlignDimensionsTo(IndexDomainView<> source,
IndexDomainView<> target,
span<DimensionIndex> source_matches,
DomainAlignmentOptions options) {
assert(source.valid());
assert(target.valid());
const DimensionIndex source_rank = source.rank();
const DimensionIndex target_rank = target.rank();
if (!(options & DomainAlignmentOptions::broadcast) &&
source_rank != target_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Aligning source domain of rank ", source_rank,
" to target domain of rank ", target_rank, " requires broadcasting"));
}
assert(source_matches.size() == source_rank);
const auto source_labels = source.labels();
const auto target_labels = target.labels();
if (!(options & DomainAlignmentOptions::permute) ||
internal_index_space::IsUnlabeled(source_labels) ||
internal_index_space::IsUnlabeled(target_labels)) {
const DimensionIndex match_rank = std::min(source_rank, target_rank);
const DimensionIndex source_match_start = source_rank - match_rank;
const DimensionIndex target_match_start = target_rank - match_rank;
std::fill_n(source_matches.begin(), source_match_start, DimensionIndex(-1));
std::iota(source_matches.begin() + source_match_start, source_matches.end(),
target_match_start);
} else {
DimensionIndex next_potentially_unlabeled_target = target_rank - 1;
for (DimensionIndex i = source_rank - 1; i >= 0; --i) {
std::string_view source_label = source_labels[i];
DimensionIndex j;
if (source_label.empty()) {
while (true) {
if (next_potentially_unlabeled_target < 0) {
j = -1;
break;
}
if (target_labels[next_potentially_unlabeled_target].empty()) {
j = next_potentially_unlabeled_target--;
break;
}
--next_potentially_unlabeled_target;
}
} else {
for (j = target_rank - 1; j >= 0; --j) {
if (target_labels[j] == source_label) break;
}
}
source_matches[i] = j;
}
}
std::string mismatch_error;
const auto source_shape = source.shape();
const auto target_shape = target.shape();
for (DimensionIndex i = 0; i < source_rank; ++i) {
DimensionIndex& j = source_matches[i];
const DimensionIndex source_size = source_shape[i];
if (j != -1) {
if (!(options & DomainAlignmentOptions::translate)
? source[i] != target[j]
: source_size != target_shape[j]) {
if (!(options & DomainAlignmentOptions::broadcast) ||
source_size != 1) {
tensorstore::StrAppend(&mismatch_error, "source dimension ", i, " ",
source[i], " mismatch with target dimension ",
j, " ", target[j], ", ");
}
j = -1;
}
} else {
if (!(options & DomainAlignmentOptions::broadcast)) {
tensorstore::StrAppend(&mismatch_error, "unmatched source dimension ",
i, " ", source[i], ", ");
}
if (source_size != 1) {
tensorstore::StrAppend(&mismatch_error, "unmatched source dimension ",
i, " ", source[i],
" does not have a size of 1, ");
}
}
}
if (!mismatch_error.empty()) {
mismatch_error.resize(mismatch_error.size() - 2);
return absl::InvalidArgumentError(
tensorstore::StrCat("Error aligning dimensions: ", mismatch_error));
}
return absl::OkStatus();
}
Result<IndexTransform<>> AlignDomainTo(IndexDomainView<> source,
IndexDomainView<> target,
DomainAlignmentOptions options) {
using internal_index_space::TransformAccess;
assert(source.valid());
assert(target.valid());
const DimensionIndex source_rank = source.rank();
DimensionIndex source_matches[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(AlignDimensionsTo(
source, target, span(source_matches).first(source_rank), options));
const DimensionIndex target_rank = target.rank();
auto alignment =
internal_index_space::TransformRep::Allocate(target_rank, source_rank);
CopyTransformRepDomain(TransformAccess::rep(target), alignment.get());
alignment->output_rank = source_rank;
const auto maps = alignment->output_index_maps();
span<const Index> source_origin = source.origin();
span<const Index> target_origin = target.origin();
for (DimensionIndex i = 0; i < source_rank; ++i) {
auto& map = maps[i];
const DimensionIndex j = source_matches[i];
const Index source_origin_value = source_origin[i];
if (j == -1) {
map.SetConstant();
map.offset() = source_origin_value;
map.stride() = 0;
} else {
map.SetSingleInputDimension(j);
map.offset() = source_origin_value - target_origin[j];
map.stride() = 1;
}
}
internal_index_space::DebugCheckInvariants(alignment.get());
return TransformAccess::Make<IndexTransform<>>(std::move(alignment));
}
Result<IndexTransform<>> AlignTransformTo(IndexTransform<> source_transform,
IndexDomainView<> target,
DomainAlignmentOptions options) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto alignment,
AlignDomainTo(source_transform.domain(), target, options));
return ComposeTransforms(source_transform, alignment);
}
} | #include "tensorstore/index_space/alignment.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MatchesStatus;
using Dao = tensorstore::DomainAlignmentOptions;
TEST(AlignDimensionsToTest, AllUnlabeled) {
auto source = IndexDomainBuilder(3)
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({2, 0, 6})
.exclusive_max({6, 4, 12})
.Finalize()
.value();
for (auto options : {Dao::all, Dao::translate | Dao::broadcast}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1, 2));
}
{
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, source, source_matches, Dao::none));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, 1, 2));
}
{
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(
AlignDimensionsTo(source, target, source_matches, Dao::translate),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"source dimension 1 \\[5, 6\\) mismatch with target "
"dimension 1 \\[0, 4\\)"));
}
{
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(
AlignDimensionsTo(source, target, source_matches, Dao::broadcast),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"source dimension 0 \\[3, 7\\) mismatch with target "
"dimension 0 \\[2, 6\\), "
"source dimension 2 \\[4, 10\\) mismatch with target "
"dimension 2 \\[6, 12\\)"));
}
}
TEST(AlignDimensionsToTest, MismatchedLabelsNoPermute) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"a", "b", "c"})
.origin({2, 0, 6})
.exclusive_max({6, 4, 12})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches,
Dao::translate | Dao::broadcast));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1, 2));
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches, Dao::all),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"unmatched source dimension 0 \"x\": \\[3, 7\\) "
"does not have a size of 1, "
"unmatched source dimension 2 \"z\": \\[4, 10\\) "
"does not have a size of 1"));
}
TEST(AlignDimensionsToTest, SourceUnlabeled) {
auto source = IndexDomainBuilder(3)
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({4, 0, 6})
.labels({"x", "y", "z"})
.exclusive_max({8, 4, 12})
.Finalize()
.value();
for (auto options : {Dao::all, Dao::translate | Dao::broadcast}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1, 2));
}
}
TEST(AlignDimensionsToTest, TargetUnlabeled) {
auto source = IndexDomainBuilder(3)
.origin({3, 5, 4})
.labels({"x", "y", "z"})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({4, 0, 6})
.exclusive_max({8, 4, 12})
.Finalize()
.value();
for (auto options : {Dao::all, Dao::translate | Dao::broadcast}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1, 2));
}
}
TEST(AlignDimensionsToTest, AllLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "x", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, -1, 0));
}
TEST(AlignDimensionsToTest, AllLabeledPermuteOnly) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "x", "y"})
.origin({4, 3, 5})
.exclusive_max({10, 7, 6})
.Finalize()
.value();
for (auto options : {Dao::permute, Dao::permute | Dao::translate,
Dao::permute | Dao::broadcast, Dao::all}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, 2, 0));
}
for (auto options : {Dao::none, Dao::translate, Dao::broadcast,
Dao::translate | Dao::broadcast}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches, options),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: .*"));
}
}
TEST(AlignDimensionsToTest, AllLabeledPermuteTranslateOnly) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 9, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "x", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, 2, 0));
}
TEST(AlignDimensionsToTest, PartiallyLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", ""})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(4)
.labels({"", "", "x", "y"})
.origin({0, 6, 4, 0})
.exclusive_max({10, 12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches));
EXPECT_THAT(source_matches, ::testing::ElementsAre(2, -1, 1));
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches, Dao::none),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Aligning source domain of rank 3 to target "
"domain of rank 4 requires broadcasting"));
}
TEST(AlignDomainToTest, PartiallyLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", ""})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(4)
.labels({"", "", "x", "y"})
.origin({0, 6, 4, 0})
.exclusive_max({10, 12, 8, 4})
.Finalize()
.value();
IndexTransform<> alignment = IndexTransformBuilder<>(4, 3)
.input_domain(target)
.output_single_input_dimension(0, -1, 1, 2)
.output_constant(1, 5)
.output_single_input_dimension(2, -2, 1, 1)
.Finalize()
.value();
EXPECT_EQ(alignment, AlignDomainTo(source, target));
}
TEST(AlignDimensionsToTest, BroadcastOnly) {
auto source = IndexDomainBuilder(2)
.origin({2, 3})
.exclusive_max({5, 6})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({1, 2, 3})
.exclusive_max({4, 5, 6})
.Finalize()
.value();
for (auto options : {Dao::broadcast, Dao::broadcast | Dao::translate,
Dao::broadcast | Dao::permute, Dao::all}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, 2));
}
for (auto options : {Dao::none, Dao::permute, Dao::translate,
Dao::permute | Dao::translate}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches, options),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Aligning source domain of rank 2 to target "
"domain of rank 3 requires broadcasting"));
}
}
TEST(AlignDimensionsToTest, PermuteAndBroadcast) {
auto source = IndexDomainBuilder(2)
.origin({2, 3})
.exclusive_max({5, 4})
.labels({"x", "y"})
.Finalize()
.value();
auto target = IndexDomainBuilder(2)
.origin({2, 5})
.exclusive_max({5, 10})
.labels({"x", "z"})
.Finalize()
.value();
for (auto options : {Dao::permute | Dao::broadcast, Dao::all}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1));
}
for (auto options : {Dao::permute, Dao::permute | Dao::translate}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(
AlignDimensionsTo(source, target, source_matches, options),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"unmatched source dimension 1 \"y\": \\[3, 4\\)"));
}
}
TEST(AlignDimensionsToTest, UnmatchedUnlabeledSourceDimension) {
auto source = IndexDomainBuilder(4)
.labels({"x", "y", "", ""})
.origin({3, 5, 7, 4})
.exclusive_max({7, 9, 8, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"", "x", "y"})
.origin({0, 4, 0})
.exclusive_max({6, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, 2, -1, 0));
}
TEST(AlignDimensionsToTest, MismatchedLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "w", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"unmatched source dimension 0 \"x\": \\[3, 7\\) "
"does not have a size of 1"));
}
TEST(AlignDomainToTest, MismatchedLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "w", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
EXPECT_THAT(AlignDomainTo(source, target),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(AlignDimensionsToTest, MismatchedSizeLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 7, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "x", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"source dimension 1 \"y\": \\[5, 7\\) mismatch "
"with target dimension 2 \"y\": \\[0, 4\\)"));
}
TEST(AlignDimensionsToTest, MismatchedSizeUnlabeled) {
auto source = IndexDomainBuilder(3)
.origin({3, 5, 4})
.exclusive_max({7, 7, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({4, 0, 6})
.exclusive_max({8, 4, 12})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"source dimension 1 \\[5, 7\\) mismatch with "
"target dimension 1 \\[0, 4\\)"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/alignment.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/alignment_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
458c05c5-40e1-494f-bb6d-f746c1850d3f | cpp | google/tensorstore | identity_transform | tensorstore/index_space/internal/identity_transform.cc | tensorstore/index_space/identity_transform_test.cc | #include "tensorstore/index_space/internal/identity_transform.h"
namespace tensorstore {
namespace internal_index_space {
void SetToIdentityTransform(span<OutputIndexMap> maps) {
for (DimensionIndex i = 0; i < maps.size(); ++i) {
auto& map = maps[i];
map.SetSingleInputDimension(i);
map.offset() = 0;
map.stride() = 1;
}
}
namespace {
void SetUnboundedDomain(TransformRep* data, DimensionIndex rank) {
assert(data->input_rank_capacity >= rank);
data->input_rank = rank;
std::fill_n(data->input_origin().begin(), rank, -kInfIndex);
std::fill_n(data->input_shape().begin(), rank, kInfSize);
const auto mask = DimensionSet::UpTo(rank);
data->implicit_lower_bounds = mask;
data->implicit_upper_bounds = mask;
}
void SetIdentityOutputOrDomainOnly(TransformRep* data, DimensionIndex rank,
bool domain_only) {
if (domain_only) {
data->output_rank = 0;
} else {
assert(data->output_rank_capacity >= rank);
data->output_rank = rank;
SetToIdentityTransform(data->output_index_maps().first(rank));
}
}
void SetToIdentityTransform(TransformRep* data, DimensionIndex rank,
bool domain_only) {
SetUnboundedDomain(data, rank);
SetIdentityOutputOrDomainOnly(data, rank, domain_only);
}
}
TransformRep::Ptr<> MakeIdentityTransform(DimensionIndex rank,
bool domain_only) {
auto data = TransformRep::Allocate(rank, domain_only ? 0 : rank);
SetToIdentityTransform(data.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(data.get());
return data;
}
TransformRep::Ptr<> MakeIdentityTransform(internal::StringLikeSpan labels,
bool domain_only) {
const DimensionIndex rank = labels.size();
auto data = TransformRep::Allocate(rank, domain_only ? 0 : rank);
SetToIdentityTransform(data.get(), rank, domain_only);
span<std::string> input_labels = data->input_labels().first(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
std::string_view label = labels[i];
input_labels[i].assign(label.data(), label.size());
}
internal_index_space::DebugCheckInvariants(data.get());
return data;
}
TransformRep::Ptr<> MakeIdentityTransformLike(TransformRep* data,
bool domain_only) {
assert(data != nullptr);
const DimensionIndex rank = data->input_rank;
auto result = TransformRep::Allocate(rank, domain_only ? 0 : rank);
CopyTransformRepDomain(data, result.get());
SetIdentityOutputOrDomainOnly(result.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
TransformRep::Ptr<> MakeIdentityTransform(span<const Index> shape,
bool domain_only) {
const DimensionIndex rank = shape.size();
auto result = TransformRep::Allocate(rank, domain_only ? 0 : rank);
result->input_rank = rank;
std::fill_n(result->input_origin().begin(), rank, 0);
std::copy_n(shape.begin(), rank, result->input_shape().begin());
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
SetIdentityOutputOrDomainOnly(result.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
TransformRep::Ptr<> MakeIdentityTransform(BoxView<> domain, bool domain_only) {
const DimensionIndex rank = domain.rank();
auto result = TransformRep::Allocate(rank, domain_only ? 0 : rank);
result->input_rank = rank;
result->input_domain(rank).DeepAssign(domain);
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
SetIdentityOutputOrDomainOnly(result.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
}
} | #include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllocateArray;
using ::tensorstore::Box;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
TEST(IdentityTransformTest, Static) {
auto t = IdentityTransform<2>();
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(t.input_rank());
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, Dynamic) {
auto t = IdentityTransform(2);
static_assert(std::is_same_v<decltype(t), IndexTransform<>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(t.input_rank());
static_assert(std::is_same_v<decltype(d), IndexDomain<>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, LabeledCString) {
auto t = IdentityTransform({"x", "y"});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({"x", "y"});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, LabeledStdString) {
auto t = IdentityTransform({std::string("x"), std::string("y")});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({std::string("x"), std::string("y")});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IndexTransformTest, LabeledStringView) {
auto t = IdentityTransform({std::string_view("x"), std::string_view("y")});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({std::string_view("x"), std::string_view("y")});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformLikeTest, IndexTransform) {
EXPECT_EQ((IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
IdentityTransformLike(IndexTransformBuilder<2, 3>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 5, 7, 1)
.output_single_input_dimension(1, 6, 8, 0)
.output_single_input_dimension(2, 7, 9, 0)
.Finalize()
.value()));
}
TEST(IdentityTransformLikeTest, Array) {
EXPECT_EQ((IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({3, 5})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
IdentityTransformLike(AllocateArray<float>({3, 5})));
}
TEST(IdentityTransformTest, StaticBox) {
auto box = Box({1, 2}, {3, 4});
auto t = IdentityTransform(box);
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, 4})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
EXPECT_EQ(box, t.domain().box());
static_assert(tensorstore::HasBoxDomain<IndexTransform<2, 2>>);
EXPECT_EQ(box, GetBoxDomainOf(t));
auto d = IndexDomain(box);
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, DynamicBox) {
auto t = IdentityTransform(Box<>({1, 2}, {3, 4}));
static_assert(std::is_same_v<decltype(t), IndexTransform<>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, 4})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(Box<>({1, 2}, {3, 4}));
static_assert(std::is_same_v<decltype(d), IndexDomain<>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, FromShape) {
auto t = IdentityTransform(span<const Index, 2>({2, 3}));
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(span<const Index, 2>({2, 3}));
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, FromShapeBracedList) {
auto t = IdentityTransform({2, 3});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({2, 3});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/identity_transform.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/identity_transform_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7c56682b-83cb-4b2e-b235-8488d234e334 | cpp | google/tensorstore | transpose_op | tensorstore/index_space/internal/transpose_op.cc | tensorstore/index_space/transpose_op_test.cc | #include "tensorstore/index_space/internal/transpose_op.h"
#include <cassert>
#include <numeric>
#include "absl/status/status.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transpose.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status MakePermutationFromMoveDimsTarget(
DimensionIndexBuffer* dimensions, DimensionIndex target,
span<DimensionIndex> permutation) {
if (dimensions->empty()) {
std::iota(permutation.begin(), permutation.end(),
static_cast<DimensionIndex>(0));
return absl::OkStatus();
}
const DimensionIndex input_rank = permutation.size();
const DimensionIndex num_dims = dimensions->size();
TENSORSTORE_ASSIGN_OR_RETURN(
target, NormalizeDimensionIndex(target, input_rank - num_dims + 1));
std::fill(permutation.begin(), permutation.end(),
static_cast<DimensionIndex>(-1));
DimensionSet moved_dims = false;
for (DimensionIndex i = 0; i < num_dims; ++i) {
DimensionIndex& input_dim = (*dimensions)[i];
moved_dims[input_dim] = true;
permutation[target + i] = input_dim;
input_dim = target + i;
}
for (DimensionIndex i = 0, orig_input_dim = 0; i < input_rank; ++i) {
if (permutation[i] != -1) continue;
while (moved_dims[orig_input_dim]) ++orig_input_dim;
permutation[i] = orig_input_dim++;
}
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyMoveDimsTo(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
DimensionIndex target,
bool domain_only) {
const DimensionIndex input_rank = transform.input_rank();
DimensionIndex permutation[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(MakePermutationFromMoveDimsTarget(
dimensions, target, span<DimensionIndex>(&permutation[0], input_rank)));
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
span<const DimensionIndex>(&permutation[0], input_rank), domain_only));
}
Result<IndexTransform<>> ApplyTranspose(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
if (static_cast<DimensionIndex>(dimensions->size()) !=
transform.input_rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of dimensions (", dimensions->size(),
") must equal input_rank (", transform.input_rank(), ")."));
}
TransformRep::Ptr<> rep = TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)), *dimensions,
domain_only);
std::iota(dimensions->begin(), dimensions->end(),
static_cast<DimensionIndex>(0));
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexTransform<>> ApplyTransposeTo(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DimensionIndex> target_dimensions, bool domain_only) {
const DimensionIndex input_rank = transform.input_rank();
if (static_cast<DimensionIndex>(dimensions->size()) !=
target_dimensions.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", dimensions->size(),
") must equal number of target dimensions (", target_dimensions.size(),
")"));
}
DimensionSet seen_existing_dim = false;
DimensionIndex permutation[kMaxRank];
std::fill_n(permutation, input_rank, -1);
for (DimensionIndex i = 0; i < target_dimensions.size(); ++i) {
DimensionIndex& orig_dim = (*dimensions)[i];
TENSORSTORE_ASSIGN_OR_RETURN(
const DimensionIndex target_dim,
NormalizeDimensionIndex(target_dimensions[i], input_rank));
if (permutation[target_dim] != -1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Target dimension ", target_dim, " occurs more than once"));
}
seen_existing_dim[orig_dim] = true;
permutation[target_dim] = orig_dim;
orig_dim = target_dim;
}
for (DimensionIndex orig_dim = 0, target_dim = 0; orig_dim < input_rank;
++orig_dim) {
if (seen_existing_dim[orig_dim]) continue;
while (permutation[target_dim] != -1) ++target_dim;
permutation[target_dim] = orig_dim;
}
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
span<const DimensionIndex>(&permutation[0], input_rank), domain_only));
}
Result<IndexTransform<>> ApplyTransposeToDynamic(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DynamicDimSpec> target_dim_specs, bool domain_only) {
if (target_dim_specs.size() == 1) {
if (auto* target = std::get_if<DimensionIndex>(&target_dim_specs.front())) {
return ApplyMoveDimsTo(std::move(transform), dimensions, *target,
domain_only);
}
}
DimensionIndexBuffer target_dimensions;
const DimensionIndex input_rank = transform.input_rank();
for (const auto& s : target_dim_specs) {
if (auto* index = std::get_if<DimensionIndex>(&s)) {
target_dimensions.push_back(*index);
} else if (auto* r = std::get_if<DimRangeSpec>(&s)) {
TENSORSTORE_RETURN_IF_ERROR(
NormalizeDimRangeSpec(*r, input_rank, &target_dimensions));
} else {
return absl::InvalidArgumentError(
"Target dimensions cannot be specified by label");
}
}
return ApplyTransposeTo(std::move(transform), dimensions, target_dimensions,
domain_only);
}
Result<IndexTransform<>> ApplyTranspose(
IndexTransform<> transform, span<const DynamicDimSpec> source_dim_specs,
bool domain_only) {
DimensionIndexBuffer source_dimensions;
source_dimensions.reserve(transform.input_rank());
TENSORSTORE_RETURN_IF_ERROR(NormalizeDynamicDimSpecs(
source_dim_specs, transform.input_labels(), &source_dimensions));
if (!IsValidPermutation(source_dimensions)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Source dimension list ", span(source_dimensions),
" is not a valid dimension permutation for rank ",
transform.input_rank()));
}
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
source_dimensions, domain_only));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(TransposeTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({3, 1, 2})
.input_shape({2, 3, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.input_labels({"z", "x", "y"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {{{2, 3, 4}, {4, 2, 3}}};
TestDimExpression(original_transform,
Dims(2, 0, 1).Transpose(),
{0, 1, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("z", "x", "y").Transpose(),
{0, 1, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TransposeTest, Simple) {
TestDimExpression(
IndexTransformBuilder<4, 2>()
.input_origin({1, 2, 3, 4})
.input_shape({5, 6, 4, 8})
.output_single_input_dimension(0, 1, 2, 1)
.output_index_array(
1, 2, 3, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
Dims(2, 0, 1, 3).Transpose(),
{0, 1, 2, 3},
IndexTransformBuilder<4, 4>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.output_single_input_dimension(3, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.output_single_input_dimension(0, 1, 2, 2)
.output_index_array(
1, 2, 3,
MakeArray<Index>(
{{{{1}}}, {{{2}}}, {{{3}}}, {{{4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
{{{2, 4, 3, 5}, {3, 2, 4, 5}}});
}
TEST(TransposeTest, Constant) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({5, 6})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
Dims(1, 0).Transpose(),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({2, 1})
.input_shape({6, 5})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({2, 1})
.input_shape({6, 5})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
{});
}
TEST(TransposeTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({5, 6})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
Dims(1).Transpose(), absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) must equal input_rank \\(2\\)\\.");
}
TEST(TransposeTest, Labeled) {
TestDimExpression(
IndexTransformBuilder<4, 2>()
.input_origin({1, 2, 3, 4})
.input_shape({5, 6, 4, 8})
.input_labels({"a", "b", "c", "d"})
.output_single_input_dimension(0, 1, 2, 1)
.output_index_array(
1, 2, 3, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
Dims(2, 0, 1, 3).Transpose(),
{0, 1, 2, 3},
IndexTransformBuilder<4, 4>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.input_labels({"c", "a", "b", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.output_single_input_dimension(3, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.input_labels({"c", "a", "b", "d"})
.output_single_input_dimension(0, 1, 2, 2)
.output_index_array(
1, 2, 3,
MakeArray<Index>(
{{{{1}}}, {{{2}}}, {{{3}}}, {{{4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
{{{2, 4, 3, 5}, {3, 2, 4, 5}}});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/transpose_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transpose_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
31b38434-5c80-41ae-9aa3-f9777e02235e | cpp | google/tensorstore | dimension_selection | tensorstore/index_space/internal/dimension_selection.cc | tensorstore/index_space/dimension_selection_test.cc | #include "tensorstore/index_space/internal/dimension_selection.h"
#include <numeric>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
absl::Status CheckAndNormalizeDimensions(DimensionIndex input_rank,
span<DimensionIndex> dimensions) {
if (dimensions.size() > input_rank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Number of dimensions (", dimensions.size(),
") exceeds input rank (", input_rank, ")."));
}
std::vector<DimensionIndex> error_dimensions;
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
TENSORSTORE_ASSIGN_OR_RETURN(
const DimensionIndex dim,
NormalizeDimensionIndex(dimensions[i], input_rank));
dimensions[i] = dim;
for (DimensionIndex j = 0; j < i; ++j) {
if (dimensions[j] == dim) {
error_dimensions.push_back(dim);
}
}
}
if (!error_dimensions.empty()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Input dimensions {", absl::StrJoin(error_dimensions, ", "),
"} specified more than once"));
}
return absl::OkStatus();
}
absl::Status GetDimensions(DimensionIndex input_rank,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
result->assign(dimensions.begin(), dimensions.end());
return CheckAndNormalizeDimensions(input_rank, *result);
}
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
return GetDimensions(transform.input_rank(), dimensions, result);
}
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIdentifier> dimensions,
DimensionIndexBuffer* result) {
const DimensionIndex input_rank = transform.input_rank();
result->resize(dimensions.size());
span<const std::string> input_labels = transform.input_labels();
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
TENSORSTORE_ASSIGN_OR_RETURN(
(*result)[i],
NormalizeDimensionIdentifier(dimensions[i], input_labels));
}
return CheckAndNormalizeDimensions(input_rank, *result);
}
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
return GetDimensions(input_rank + dimensions.size(), dimensions, result);
}
absl::Status GetAllDimensions(DimensionIndex input_rank,
DimensionIndexBuffer* result) {
result->resize(input_rank);
std::iota(result->begin(), result->end(), static_cast<DimensionIndex>(0));
return absl::OkStatus();
}
absl::Status GetDimensions(span<const std::string> labels,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result) {
result->clear();
TENSORSTORE_RETURN_IF_ERROR(
NormalizeDynamicDimSpecs(dimensions, labels, result));
return CheckAndNormalizeDimensions(labels.size(), *result);
}
namespace {
Result<DimensionIndex> GetNumNewDimensions(const DimRangeSpec& spec) {
const DimensionIndex step = spec.step;
if (step == 0) return absl::InvalidArgumentError("step must not be 0");
if (spec.inclusive_start) {
const DimensionIndex inclusive_start = *spec.inclusive_start;
if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if ((exclusive_stop < 0) == (inclusive_start < 0) &&
((step > 0 && exclusive_stop >= inclusive_start) ||
(step < 0 && exclusive_stop <= inclusive_start))) {
return CeilOfRatio(*spec.exclusive_stop - inclusive_start, step);
}
} else if (step > 0) {
if (inclusive_start < 0) {
return CeilOfRatio(-inclusive_start, step);
}
} else {
if (inclusive_start >= 0) {
return CeilOfRatio(inclusive_start + 1, -step);
}
}
} else if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if (step > 0) {
if (exclusive_stop >= 0) {
return CeilOfRatio(exclusive_stop, step);
}
} else {
if (exclusive_stop < 0) {
return CeilOfRatio(-(exclusive_stop + 1), -step);
}
}
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"`", spec, "` is not a valid specification for new dimensions"));
}
}
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result) {
DimensionIndex new_rank = input_rank;
for (const auto& spec : dimensions) {
if (auto* r = std::get_if<DimRangeSpec>(&spec)) {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex x, GetNumNewDimensions(*r));
new_rank += x;
} else {
new_rank += 1;
}
}
result->clear();
result->reserve(new_rank);
struct Visitor {
DimensionIndex new_rank;
DimensionIndexBuffer* result;
absl::Status operator()(DimensionIndex i) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionIndex(i, new_rank));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const std::string& label) const {
return absl::InvalidArgumentError(
"New dimensions cannot be specified by label");
}
absl::Status operator()(const DimRangeSpec& s) const {
return NormalizeDimRangeSpec(s, new_rank, result);
}
};
for (const auto& spec : dimensions) {
TENSORSTORE_RETURN_IF_ERROR(std::visit(Visitor{new_rank, result}, spec));
}
return CheckAndNormalizeDimensions(new_rank, *result);
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionIndexBuffer;
using ::tensorstore::DimRangeSpec;
using ::tensorstore::Dims;
using ::tensorstore::dynamic_rank;
using ::tensorstore::DynamicDims;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TestDimExpressionError;
TEST(DimsTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(span<const DimensionIndex>({0, 0, 1})).IndexSlice(0),
absl::StatusCode::kInvalidArgument,
"Number of dimensions .* exceeds input rank .*");
TestDimExpressionError(IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(2).Label("b"), absl::StatusCode::kInvalidArgument,
"Dimension index 2 is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(1, 1).Label("b", "c"),
absl::StatusCode::kInvalidArgument,
"Input dimensions \\{1\\} specified more than once.*");
}
TEST(DimsTest, SelectUsingLabels) {
TestDimExpression(
IndexTransformBuilder<2, 0>()
.input_labels({"x", "y"})
.Finalize()
.value(),
Dims("x").Label("a"),
{0},
IndexTransformBuilder<2, 2>()
.input_labels({"a", "y"})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<2, 0>().input_labels({"a", "y"}).Finalize().value(),
{});
TestDimExpressionError(
IndexTransformBuilder<2, 0>().input_labels({"x", "y"}).Finalize().value(),
Dims("a").Label("z"), absl::StatusCode::kInvalidArgument,
"Label \"a\" does not match one of \\{\"x\", \"y\"\\}");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().input_labels({"", ""}).Finalize().value(),
Dims("").Label("z"), absl::StatusCode::kInvalidArgument,
"Dimension cannot be specified by empty label");
TestDimExpression(
IndexTransformBuilder<2, 0>()
.input_labels({"x", "y"})
.Finalize()
.value(),
Dims({"x", -1}).Label("a", "b"),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_labels({"a", "b"})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<2, 0>().input_labels({"a", "b"}).Finalize().value(),
{});
}
TEST(DynamicDimsTest, Existing) {
const auto original_transform = IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value();
const auto expected_identity_new_transform =
IndexTransformBuilder<4, 4>()
.input_labels({"a1", "b1", "c1", "d1"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<4, 0>()
.input_labels({"a1", "b1", "c1", "d1"})
.Finalize()
.value();
TestDimExpression(
original_transform,
Dims(DimRangeSpec{1, 4, 2}, 0, "c").Label("b1", "d1", "a1", "c1"),
{1, 3, 0, 2},
expected_identity_new_transform,
expected_new_transform,
{});
TestDimExpression(
original_transform,
DynamicDims({DimRangeSpec{1, 4, 2}, 0, "c"})
.Label("b1", "d1", "a1", "c1"),
{1, 3, 0, 2},
expected_identity_new_transform,
expected_new_transform,
{});
}
TEST(DynamicDimsTest, CombinedNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 2}, 0, -1).AddNew().Label("e", "f", "g", "h"),
{1, 3, 0, 7},
IndexTransformBuilder<dynamic_rank, 4>(8, tensorstore::StaticRank<4>{})
.input_labels({"g", "e", "a", "f", "b", "c", "d", "h"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 4)
.output_single_input_dimension(2, 5)
.output_single_input_dimension(3, 6)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(8)
.input_labels({"g", "e", "a", "f", "b", "c", "d", "h"})
.Finalize()
.value(),
{},
false);
}
TEST(DynamicDimsTest, InvalidNewLabel) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 2}, "x").AddNew(),
absl::StatusCode::kInvalidArgument,
"New dimensions cannot be specified by label");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewUnbounded) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, std::nullopt, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewMissingStop) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{5, std::nullopt, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`5:` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewNegativeStop) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, -3, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:-3` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewNegativeStartNegativeStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-5, std::nullopt, -1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`-5::-1` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewMissingStart) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, 5, -1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:5:-1` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidInterval) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{6, 5, 1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`6:5` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidMixedSigns) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-1, 4, 1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`-1:4` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewZeroStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 0}).AddNew(), absl::StatusCode::kInvalidArgument,
"step must not be 0");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidIntervalNegativeStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{5, 6, -1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`5:6:-1` is not a valid specification for new dimensions");
}
TEST(DimsTest, DimRangeSpecNegativeStep) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-4, -7, -2}).AddNew().Label("e", "f"),
{2, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecNegativeIndicesNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-6, -3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-3, std::nullopt, 2}).AddNew().Label("e", "f"),
{3, 5},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, std::nullopt, -1}).AddNew().Label("e", "f"),
{1, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, -4, -2}).AddNew().Label("e", "f"),
{5, 3},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, 3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(ResolveTest, Example) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder<3>().labels({"x", "y", "z"}).Finalize());
DimensionIndexBuffer buffer;
TENSORSTORE_EXPECT_OK(Dims("x", "z").Resolve(domain, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 2));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/dimension_selection.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_selection_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e623c8f3-9336-4f63-a160-ff9b2d0ae2db | cpp | google/tensorstore | label_op | tensorstore/index_space/internal/label_op.cc | tensorstore/index_space/label_op_test.cc | #include "tensorstore/index_space/internal/label_op.h"
#include <stddef.h>
#include <string>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/internal/dimension_labels.h"
#include "tensorstore/internal/string_like.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyLabel(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
internal::StringLikeSpan labels,
bool domain_only) {
if (dimensions->size() != static_cast<size_t>(labels.size())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of dimensions (", dimensions->size(),
") does not match number of labels (", labels.size(), ")."));
}
auto rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
const DimensionIndex input_rank = rep->input_rank;
span<std::string> input_labels = rep->input_labels().first(input_rank);
for (DimensionIndex i = 0;
i < static_cast<DimensionIndex>(dimensions->size()); ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
std::string_view label = labels[i];
input_labels[input_dim].assign(label.begin(), label.end());
}
TENSORSTORE_RETURN_IF_ERROR(
internal::ValidateDimensionLabelsAreUnique(input_labels));
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::IdentityTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(LabelTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"a", "y", "b"})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).Label("a", "b"),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
TestDimExpression(original_transform,
Dims("x", "z").Label("a", "b"),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(LabelTest, MultipleArguments) {
TestDimExpression(
IndexTransformBuilder<3, 1>()
.output_constant(0, 1)
.Finalize()
.value(),
Dims(1, 0).Label("x", "y"),
{1, 0},
IndexTransformBuilder<3, 3>()
.input_labels({"y", "x", ""})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<3, 1>()
.input_labels({"y", "x", ""})
.output_constant(0, 1)
.Finalize()
.value(),
{});
}
TEST(LabelTest, ErrorHandling) {
TestDimExpressionError(
IdentityTransform(1),
Dims(span<const DimensionIndex>({0})).Label("x", "y"),
absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) does not match number of "
"labels \\(2\\)\\.");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/label_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/label_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
fc22fcd4-0c88-46a4-b421-8053fcf0fff6 | cpp | google/tensorstore | compose_transforms | tensorstore/index_space/internal/compose_transforms.cc | tensorstore/index_space/compose_transforms_test.cc | #include "tensorstore/index_space/internal/compose_transforms.h"
#include <cassert>
#include <sstream>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_replace.h"
#include "tensorstore/box.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/propagate_bounds.h"
#include "tensorstore/index_space/internal/transform_array.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
bool IsSingletonIndexArrayMap(StridedLayoutView<> layout) {
for (DimensionIndex dim = 0, rank = layout.rank(); dim < rank; ++dim) {
if (layout.byte_strides()[dim] == 0) continue;
if (layout.shape()[dim] != 1) return false;
}
return true;
}
absl::Status ComposeTransformsImpl(TransformRep* b_to_c,
bool can_move_from_b_to_c,
TransformRep* a_to_b,
bool can_move_from_a_to_b,
TransformRep* a_to_c, bool domain_only) {
assert(b_to_c != nullptr && a_to_b != nullptr && a_to_c != nullptr);
const DimensionIndex a_to_c_output_rank =
domain_only ? 0 : b_to_c->output_rank;
assert(a_to_c_output_rank <= a_to_c->output_rank_capacity &&
a_to_b->output_rank == b_to_c->input_rank &&
a_to_b->input_rank <= a_to_c->input_rank_capacity);
assert(a_to_c != b_to_c && a_to_c != a_to_b);
const DimensionIndex a_rank = a_to_b->input_rank;
const DimensionIndex b_rank = a_to_b->output_rank;
const DimensionIndex c_rank = b_to_c->output_rank;
a_to_c->input_rank = a_rank;
a_to_c->output_rank = a_to_c_output_rank;
CopyInputLabels(a_to_b, a_to_c,
can_move_from_a_to_b);
BoxView<> b_to_c_domain = b_to_c->input_domain(b_rank);
MutableBoxView<> a_to_c_domain = a_to_c->input_domain(a_rank);
TENSORSTORE_RETURN_IF_ERROR(PropagateBounds(
b_to_c_domain, b_to_c->implicit_lower_bounds,
b_to_c->implicit_upper_bounds, a_to_b, a_to_c_domain,
a_to_c->implicit_lower_bounds, a_to_c->implicit_upper_bounds));
if (domain_only) {
internal_index_space::DebugCheckInvariants(a_to_c);
return absl::OkStatus();
}
span<const OutputIndexMap> b_to_c_output_index_maps =
b_to_c->output_index_maps().first(c_rank);
span<const OutputIndexMap> a_to_b_output_index_maps =
a_to_b->output_index_maps().first(b_rank);
span<OutputIndexMap> a_to_c_output_index_maps =
a_to_c->output_index_maps().first(c_rank);
const bool a_to_c_domain_is_explicitly_empty =
IsDomainExplicitlyEmpty(a_to_c);
for (DimensionIndex c_dim = 0; c_dim < c_rank; ++c_dim) {
auto& b_to_c_map = b_to_c_output_index_maps[c_dim];
auto& a_to_c_map = a_to_c_output_index_maps[c_dim];
const OutputIndexMethod b_to_c_method = b_to_c_map.stride() == 0
? OutputIndexMethod::constant
: b_to_c_map.method();
switch (b_to_c_method) {
case OutputIndexMethod::constant: {
a_to_c_map.SetConstant();
a_to_c_map.stride() = 0;
a_to_c_map.offset() = b_to_c_map.offset();
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex b_dim = b_to_c_map.input_dimension();
assert(b_dim >= 0 && b_dim < b_rank);
auto& a_to_b_map = a_to_b_output_index_maps[b_dim];
const OutputIndexMethod a_to_b_method =
a_to_b_map.stride() == 0 ? OutputIndexMethod::constant
: a_to_b_map.method();
Index new_output_offset;
if (internal::MulOverflow(a_to_b_map.offset(), b_to_c_map.stride(),
&new_output_offset) ||
internal::AddOverflow(b_to_c_map.offset(), new_output_offset,
&a_to_c_map.offset())) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Integer overflow computing output "
"offset for output dimension ",
c_dim, "."));
}
if (a_to_b_method == OutputIndexMethod::constant) {
a_to_c_map.SetConstant();
a_to_c_map.stride() = 0;
break;
}
if (internal::MulOverflow(a_to_b_map.stride(), b_to_c_map.stride(),
&a_to_c_map.stride())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing output_strides[", c_dim,
"] = ", a_to_b_map.stride(), " * ", b_to_c_map.stride(), "."));
}
if (a_to_b_method == OutputIndexMethod::single_input_dimension) {
const DimensionIndex a_dim = a_to_b_map.input_dimension();
assert(a_dim >= 0 && a_dim < a_rank);
a_to_c_map.SetSingleInputDimension(a_dim);
break;
}
assert(a_to_b_method == OutputIndexMethod::array);
if (a_to_c_domain_is_explicitly_empty) {
a_to_c_map.SetConstant();
a_to_c_map.offset() = 0;
a_to_c_map.stride() = 0;
break;
}
const auto& a_to_b_index_array_data = a_to_b_map.index_array_data();
IndexInterval index_range;
{
TENSORSTORE_ASSIGN_OR_RETURN(
const IndexInterval propagated_bounds,
GetAffineTransformDomain(
OptionallyImplicitIndexInterval{
b_to_c_domain[b_dim],
b_to_c->implicit_lower_bounds[b_dim],
b_to_c->implicit_upper_bounds[b_dim]}
.effective_interval(),
a_to_b_map.offset(), a_to_b_map.stride()));
index_range =
Intersect(a_to_b_index_array_data.index_range, propagated_bounds);
}
if (IsSingletonIndexArrayMap(
StridedLayoutView<>(a_rank, a_to_c_domain.shape().data(),
a_to_b_index_array_data.byte_strides))) {
a_to_c_map.SetConstant();
TENSORSTORE_RETURN_IF_ERROR(ReplaceZeroRankIndexArrayIndexMap(
*a_to_b_index_array_data.array_view(a_to_b->input_domain(a_rank))
.byte_strided_origin_pointer(),
index_range, &a_to_c_map.offset(), &a_to_c_map.stride()));
} else {
auto& index_array =
a_to_c_map.SetArrayIndexing(a_rank, a_to_b_index_array_data);
index_array.index_range = index_range;
}
break;
}
case OutputIndexMethod::array: {
auto& a_to_c_map = a_to_c_output_index_maps[c_dim];
if (a_to_c_domain_is_explicitly_empty) {
a_to_c_map.SetConstant();
a_to_c_map.offset() = 0;
a_to_c_map.stride() = 0;
break;
}
auto& index_array_data = b_to_c_map.index_array_data();
auto& result_array_data = a_to_c_map.SetArrayIndexing(a_rank);
result_array_data.index_range = index_array_data.index_range;
TENSORSTORE_ASSIGN_OR_RETURN(
auto transformed_element_pointer,
TransformArraySubRegion(
index_array_data.shared_array_view(b_to_c_domain), a_to_b,
a_to_c_domain.origin().data(), a_to_c_domain.shape().data(),
result_array_data.byte_strides,
{skip_repeated_elements}));
auto new_index_array_origin_pointer =
StaticDataTypeCast<const Index, unchecked>(
std::move(transformed_element_pointer));
result_array_data.element_pointer = AddByteOffset(
new_index_array_origin_pointer,
-IndexInnerProduct(a_rank, result_array_data.byte_strides,
a_to_c_domain.origin().data()));
Index output_offset = b_to_c_map.offset();
Index output_stride = b_to_c_map.stride();
if (IsSingletonIndexArrayMap(
StridedLayoutView<>(a_rank, a_to_c_domain.shape().data(),
result_array_data.byte_strides))) {
TENSORSTORE_RETURN_IF_ERROR(ReplaceZeroRankIndexArrayIndexMap(
*new_index_array_origin_pointer.data(),
result_array_data.index_range, &output_offset, &output_stride));
a_to_c_map.SetConstant();
}
a_to_c_map.offset() = output_offset;
a_to_c_map.stride() = output_stride;
break;
}
}
}
internal_index_space::DebugCheckInvariants(a_to_c);
return absl::OkStatus();
}
}
Result<TransformRep::Ptr<>> ComposeTransforms(TransformRep* b_to_c,
bool can_move_from_b_to_c,
TransformRep* a_to_b,
bool can_move_from_a_to_b,
bool domain_only) {
assert(b_to_c);
assert(a_to_b);
const DimensionIndex a_rank = a_to_b->input_rank;
const DimensionIndex b_rank = a_to_b->output_rank;
const DimensionIndex c_rank = b_to_c->output_rank;
absl::Status status;
if (b_rank == b_to_c->input_rank) {
auto data = TransformRep::Allocate(a_rank, domain_only ? 0 : c_rank);
status =
ComposeTransformsImpl(b_to_c, can_move_from_b_to_c, a_to_b,
can_move_from_a_to_b, data.get(), domain_only);
if (status.ok()) {
return data;
}
} else {
status = absl::InvalidArgumentError(
tensorstore::StrCat("Rank ", b_to_c->input_rank, " -> ", c_rank,
" transform cannot be composed with rank ", a_rank,
" -> ", b_rank, " transform."));
}
assert(!status.ok());
auto format_transform = [](TransformRep* rep) {
std::ostringstream os;
internal_index_space::PrintToOstream(os, rep);
std::string str = os.str();
absl::StrReplaceAll({{"\n", " "}}, &str);
return absl::Cord(str);
};
AddStatusPayload(status, "transform", format_transform(a_to_b));
if (!status.GetPayload("domain").has_value()) {
AddStatusPayload(status, "left_transform", format_transform(b_to_c));
}
return status;
}
Result<IndexTransform<dynamic_rank, dynamic_rank, container>> ComposeTransforms(
IndexTransform<dynamic_rank, dynamic_rank, container> b_to_c,
IndexTransform<dynamic_rank, dynamic_rank, container> a_to_b,
bool domain_only) {
auto b_to_c_rep = TransformAccess::rep(b_to_c);
auto a_to_b_rep = TransformAccess::rep(a_to_b);
TENSORSTORE_ASSIGN_OR_RETURN(
auto a_to_c_rep,
internal_index_space::ComposeTransforms(
b_to_c_rep,
b_to_c_rep->is_unique(), a_to_b_rep,
a_to_b_rep->is_unique(), domain_only));
return TransformAccess::Make<IndexTransform<>>(std::move(a_to_c_rep));
}
}
} | #include "tensorstore/index_space/internal/compose_transforms.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
TEST(ComposeTransformsTest, EmptyDomain) {
auto b_to_c = IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({5, 6, 7})
.output_identity_transform()
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<2, 3>()
.input_origin({1, 2})
.input_shape({5, 0})
.output_identity_transform()
.output_constant(2, 5)
.Finalize()
.value();
auto a_to_c = ComposeTransforms(b_to_c, a_to_b).value();
auto expected_a_to_c = IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({5, 0})
.output_identity_transform()
.Finalize()
.value();
EXPECT_EQ(expected_a_to_c, a_to_c);
}
TEST(ComposeTransformsTest, TransformArrayError) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}))
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({1}),
IndexInterval::Closed(4, 6))
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(b_to_c, a_to_b),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(ComposeTransformsTest, BtoCIndexArrayWithSingleIndex) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({7, 8}))
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value();
auto a_to_c = ComposeTransforms(b_to_c, a_to_b).value();
auto expected_a_to_c = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({1})
.output_constant(0, 8)
.Finalize()
.value();
EXPECT_EQ(expected_a_to_c, a_to_c);
}
TEST(ComposeTransformsTest, BtoCIndexArrayWithInvalidSingleIndex) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({7, 8}),
IndexInterval::Closed(2, 3))
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(b_to_c, a_to_b),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(ComposeTransformsTest, AtoBIndexArrayWithSingleIndex) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.output_identity_transform()
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({7}))
.Finalize()
.value();
auto a_to_c = ComposeTransforms(b_to_c, a_to_b).value();
auto expected_a_to_c = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({1})
.output_constant(0, 7)
.Finalize()
.value();
EXPECT_EQ(expected_a_to_c, a_to_c);
}
TEST(ComposeTransformsTest, AtoBIndexArrayWithInvalidSingleIndex) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.output_identity_transform()
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({7}),
IndexInterval::Closed(2, 3))
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(b_to_c, a_to_b),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(ComposeTransformsTest, ConstantOutOfDomain) {
auto b_to_c = IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({5, 6, 7})
.output_identity_transform()
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<2, 3>()
.input_origin({1, 2})
.input_shape({5, 4})
.output_identity_transform()
.output_constant(2, 2)
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(b_to_c, a_to_b).status(),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 2 is outside valid range \\[3, 10\\)"));
}
TEST(ComposeTransformsTest, ConstantOverflow) {
EXPECT_THAT(ComposeTransforms(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 0, 100, 0)
.Finalize()
.value(),
IndexTransformBuilder<0, 1>()
.output_constant(0, kMaxFiniteIndex)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ComposeTransforms(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<0, 1>()
.output_constant(0, 100)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ComposeTransformsTest, SingleInputDimensionOverflow) {
EXPECT_THAT(
ComposeTransforms(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({10})
.output_single_input_dimension(0, 100, 1, 0)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ComposeTransforms(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 0, 100, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({10})
.output_single_input_dimension(0, kMaxFiniteIndex, 1, 0)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ComposeTransforms(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 0, 100, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({10})
.output_single_input_dimension(
0, 0, std::numeric_limits<Index>::max() - 1, 0)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ComposeTransformsTest, IndexArrayBoundsOverflow) {
EXPECT_THAT(ComposeTransforms(
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({100})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, std::numeric_limits<Index>::min(),
1, MakeArray<Index>({1, 2}),
IndexInterval::Closed(0, 100))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ComposeTransformsTest, RankMismatch) {
EXPECT_THAT(
ComposeTransforms(IdentityTransform(2), IdentityTransform(3)).status(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Rank 2 -> 2 transform cannot be composed with rank 3 -> 3 "
"transform\\."));
}
TEST(ComposeTransformsTest, FunctionCallOperator) {
const auto t0 = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_single_input_dimension(0, 10, 1, 0)
.Finalize()
.value();
const auto t1 = IndexTransformBuilder<1, 1>()
.input_origin({10})
.input_shape({5})
.output_single_input_dimension(0, 20, 1, 0)
.Finalize()
.value();
const auto expected_composed = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_single_input_dimension(0, 30, 1, 0)
.Finalize()
.value();
const auto composed = t0(t1).value();
EXPECT_EQ(expected_composed, composed);
EXPECT_EQ(expected_composed, ComposeTransforms(t1, t0).value());
}
TEST(ComposeTransformsTest, RankZero) {
auto t0 = IdentityTransform(0);
EXPECT_EQ(t0, ComposeTransforms(t0, t0).value());
}
TEST(ComposeTransformsTest, ImplicitOutOfBounds) {
const auto t0 = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({4})
.implicit_lower_bounds({1})
.output_identity_transform()
.Finalize()
.value();
const auto t1 = IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_exclusive_max({2})
.output_identity_transform()
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(t0, t1), ::testing::Optional(t1));
}
TEST(ComposeTransformsTest, TransformIndexArraySkipRepeatedElements) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t0, IndexTransformBuilder(2, 2)
.input_shape({5, 2})
.output_index_array(
0, 0, 1, MakeArray<Index>({{0}, {1}, {2}, {3}, {4}}))
.output_single_input_dimension(1, 1)
.Finalize());
EXPECT_THAT(t0.output_index_maps()[0].index_array().byte_strides(),
::testing::ElementsAre(8, 0));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto t1, ComposeTransforms(t0, t0));
EXPECT_EQ(t0, t1);
EXPECT_THAT(t1.output_index_maps()[0].index_array().byte_strides(),
::testing::ElementsAre(8, 0));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/compose_transforms.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/compose_transforms_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d28dc637-0d34-4545-8a45-35b6ed5a752c | cpp | google/tensorstore | propagate_bounds | tensorstore/index_space/internal/propagate_bounds.cc | tensorstore/index_space/propagate_bounds_test.cc | #include "tensorstore/index_space/internal/propagate_bounds.h"
#include <algorithm>
#include <cassert>
#include <sstream>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_replace.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/internal/identity_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status PropagateBoundsImpl(BoxView<> b,
DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, MutableBoxView<> a) {
if (!a_to_b) {
assert(a.rank() == b.rank());
a.DeepAssign(b);
return absl::OkStatus();
}
assert(a_to_b->input_rank == a.rank());
assert(a_to_b->output_rank == b.rank());
a.Fill();
span<const OutputIndexMap> maps = a_to_b->output_index_maps().first(b.rank());
DimensionSet propagated_to_a;
DimensionSet inferred_implicit_lower_bounds(true);
DimensionSet inferred_implicit_upper_bounds(true);
auto& implicit_lower_bounds = a_to_b->implicit_lower_bounds;
auto& implicit_upper_bounds = a_to_b->implicit_upper_bounds;
const auto existing_input_domain = a_to_b->input_domain(a.rank());
bool is_domain_empty = false;
for (DimensionIndex a_dim = 0; a_dim < a.rank(); ++a_dim) {
if (!implicit_lower_bounds[a_dim] && !implicit_upper_bounds[a_dim] &&
existing_input_domain[a_dim].empty()) {
is_domain_empty = true;
break;
}
}
for (DimensionIndex b_dim = 0; b_dim < b.rank(); ++b_dim) {
auto& map = maps[b_dim];
const Index output_stride = map.stride();
if (map.method() == OutputIndexMethod::array) continue;
OptionallyImplicitIndexInterval b_bounds_oi{b[b_dim],
b_implicit_lower_bounds[b_dim],
b_implicit_upper_bounds[b_dim]};
if (output_stride == 0 || map.method() == OutputIndexMethod::constant) {
if (!is_domain_empty) {
TENSORSTORE_RETURN_IF_ERROR(
CheckContains(b_bounds_oi.effective_interval(), map.offset()),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Checking bounds of constant output "
"index map for dimension ",
b_dim)));
}
continue;
}
const DimensionIndex a_dim = map.input_dimension();
assert(a_dim >= 0 && a_dim < a.rank());
TENSORSTORE_ASSIGN_OR_RETURN(
OptionallyImplicitIndexInterval propagated_a_bounds,
GetAffineTransformDomain(b_bounds_oi, map.offset(), map.stride()),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Propagating bounds from dimension ", b_dim,
" to input dimension ", a_dim)));
propagated_a_bounds = IntersectPreferringExplicit(
propagated_a_bounds,
OptionallyImplicitIndexInterval{a[a_dim],
inferred_implicit_lower_bounds[a_dim],
inferred_implicit_upper_bounds[a_dim]});
a[a_dim] = propagated_a_bounds.interval();
inferred_implicit_lower_bounds[a_dim] =
propagated_a_bounds.implicit_lower();
inferred_implicit_upper_bounds[a_dim] =
propagated_a_bounds.implicit_upper();
propagated_to_a[a_dim] = true;
}
for (DimensionIndex a_dim = 0; a_dim < a.rank(); ++a_dim) {
IndexInterval existing = existing_input_domain[a_dim];
IndexIntervalRef inferred = a[a_dim];
if (!propagated_to_a[a_dim]) {
inferred = existing;
continue;
}
const Index inclusive_min = implicit_lower_bounds[a_dim]
? inferred.inclusive_min()
: existing.inclusive_min();
const Index inclusive_max =
std::max(inclusive_min - 1, implicit_upper_bounds[a_dim]
? inferred.inclusive_max()
: existing.inclusive_max());
const IndexInterval combined =
IndexInterval::UncheckedClosed(inclusive_min, inclusive_max);
const OptionallyImplicitIndexInterval inferred_oi{
inferred, inferred_implicit_lower_bounds[a_dim],
inferred_implicit_upper_bounds[a_dim]};
if (!is_domain_empty &&
!Contains(inferred_oi.effective_interval(), combined)) {
std::ostringstream os;
os << "Propagated bounds " << inferred_oi;
if (inferred_oi.size() != kInfSize) {
os << ", with size=" << inferred_oi.size() << ", ";
}
os << "for dimension " << a_dim
<< " are incompatible with existing bounds " << combined;
if (combined.size() != kInfSize) {
os << ", with size=" << combined.size();
}
os << ".";
return absl::OutOfRangeError(os.str());
}
inferred = combined;
}
return absl::OkStatus();
}
void PropagateImplicitBoundState(DimensionIndex b_rank,
DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, DimensionIndex a_rank,
DimensionSet& a_implicit_lower_bounds,
DimensionSet& a_implicit_upper_bounds) {
if (!a_to_b) {
a_implicit_lower_bounds = b_implicit_lower_bounds;
a_implicit_upper_bounds = b_implicit_upper_bounds;
return;
}
a_implicit_lower_bounds = a_to_b->implicit_lower_bounds;
a_implicit_upper_bounds = a_to_b->implicit_upper_bounds;
span<const OutputIndexMap> maps = a_to_b->output_index_maps().first(b_rank);
for (DimensionIndex b_dim = 0; b_dim < b_rank; ++b_dim) {
auto& map = maps[b_dim];
if (map.method() != OutputIndexMethod::single_input_dimension ||
map.stride() == 0) {
continue;
}
const DimensionIndex a_dim = map.input_dimension();
assert(a_dim >= 0 && a_dim < a_rank);
bool implicit_lower = b_implicit_lower_bounds[b_dim];
bool implicit_upper = b_implicit_upper_bounds[b_dim];
if (map.stride() < 0) {
std::swap(implicit_lower, implicit_upper);
}
if (!implicit_lower) a_implicit_lower_bounds[a_dim] = false;
if (!implicit_upper) a_implicit_upper_bounds[a_dim] = false;
}
}
}
absl::Status PropagateBounds(BoxView<> b, DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, MutableBoxView<> a) {
auto status = PropagateBoundsImpl(b, b_implicit_lower_bounds,
b_implicit_upper_bounds, a_to_b, a);
if (!status.ok()) {
std::ostringstream os;
internal_index_space::PrintToOstream(os, a_to_b);
std::string str = os.str();
absl::StrReplaceAll({{"\n", " "}}, &str);
AddStatusPayload(status, "transform", absl::Cord(str));
AddStatusPayload(status, "domain", absl::Cord(tensorstore::StrCat(b)));
}
return status;
}
absl::Status PropagateExplicitBounds(BoxView<> b, TransformRep* a_to_b,
MutableBoxView<> a) {
return PropagateBounds(b, false, false, a_to_b, a);
}
absl::Status PropagateBounds(BoxView<> b, DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, MutableBoxView<> a,
DimensionSet& a_implicit_lower_bounds,
DimensionSet& a_implicit_upper_bounds) {
PropagateImplicitBoundState(b.rank(), b_implicit_lower_bounds,
b_implicit_upper_bounds, a_to_b, a.rank(),
a_implicit_lower_bounds, a_implicit_upper_bounds);
return PropagateBounds(b, b_implicit_lower_bounds, b_implicit_upper_bounds,
a_to_b, a);
}
Result<TransformRep::Ptr<>> PropagateBoundsToTransform(
BoxView<> b_domain, DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds, TransformRep::Ptr<> a_to_b) {
const DimensionIndex b_rank = b_domain.rank();
if (!a_to_b) {
a_to_b = TransformRep::Allocate(b_rank, b_rank);
a_to_b->input_rank = a_to_b->output_rank = b_rank;
SetToIdentityTransform(a_to_b->output_index_maps().first(b_rank));
a_to_b->input_domain(b_rank).DeepAssign(b_domain);
a_to_b->implicit_lower_bounds = b_implicit_lower_bounds;
a_to_b->implicit_upper_bounds = b_implicit_upper_bounds;
internal_index_space::DebugCheckInvariants(a_to_b.get());
return a_to_b;
}
const DimensionIndex a_rank = a_to_b->input_rank;
Box<dynamic_rank(internal::kNumInlinedDims)> bounds_temp(a_rank);
TENSORSTORE_RETURN_IF_ERROR(PropagateBounds(b_domain, b_implicit_lower_bounds,
b_implicit_upper_bounds,
a_to_b.get(), bounds_temp));
a_to_b = MutableRep(std::move(a_to_b));
a_to_b->input_domain(a_rank).DeepAssign(bounds_temp);
PropagateImplicitBoundState(
b_rank, b_implicit_lower_bounds, b_implicit_upper_bounds, a_to_b.get(),
a_rank, a_to_b->implicit_lower_bounds, a_to_b->implicit_upper_bounds);
const bool domain_is_explicitly_empty = IsDomainExplicitlyEmpty(a_to_b.get());
const auto output_index_maps = a_to_b->output_index_maps().first(b_rank);
for (DimensionIndex b_dim = 0; b_dim < b_rank; ++b_dim) {
auto& map = output_index_maps[b_dim];
if (map.method() != OutputIndexMethod::array) continue;
if (domain_is_explicitly_empty) {
map.SetConstant();
map.offset() = 0;
map.stride() = 0;
continue;
}
auto& index_array_data = map.index_array_data();
TENSORSTORE_ASSIGN_OR_RETURN(
const IndexInterval propagated_bounds,
GetAffineTransformDomain(
OptionallyImplicitIndexInterval(b_domain[b_dim],
b_implicit_lower_bounds[b_dim],
b_implicit_upper_bounds[b_dim])
.effective_interval(),
map.offset(), map.stride()));
index_array_data.index_range =
Intersect(propagated_bounds, index_array_data.index_range);
}
internal_index_space::DebugCheckInvariants(a_to_b.get());
return a_to_b;
}
Result<TransformRep::Ptr<>> PropagateExplicitBoundsToTransform(
BoxView<> b_domain, TransformRep::Ptr<> a_to_b) {
return PropagateBoundsToTransform(b_domain, false, false, std::move(a_to_b));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionSet;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::PropagateBounds;
using ::tensorstore::PropagateBoundsToTransform;
using ::tensorstore::PropagateExplicitBounds;
using ::tensorstore::PropagateExplicitBoundsToTransform;
TEST(PropagateExplicitBoundsTest, IdentityTransform) {
DimensionIndex rank = 2;
const Box<> b({2, 3}, {4, 5});
Box<> a(rank);
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, IndexTransform<>(), a));
EXPECT_EQ(a, b);
}
TEST(PropagateBoundsTest, IdentityTransform) {
auto b = Box({2, 3}, {4, 5});
Box<2> a;
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
auto b_implicit_lower_bounds = DimensionSet::FromBools({0, 1});
auto b_implicit_upper_bounds = DimensionSet::FromBools({1, 0});
TENSORSTORE_ASSERT_OK(
PropagateBounds(b, b_implicit_lower_bounds, b_implicit_upper_bounds,
IndexTransform<2, 2>(), a, a_implicit_lower_bounds,
a_implicit_upper_bounds));
EXPECT_EQ(a, b);
EXPECT_EQ(b_implicit_lower_bounds, a_implicit_lower_bounds);
EXPECT_EQ(b_implicit_upper_bounds, a_implicit_upper_bounds);
}
TEST(PropagateBoundsTest, ValidateOnly) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({5, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<2> a;
DimensionSet b_implicit_lower_bounds = DimensionSet::FromBools({0, 1, 0});
DimensionSet b_implicit_upper_bounds = DimensionSet::FromBools({1, 0, 0});
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(PropagateBounds(
b, b_implicit_lower_bounds, b_implicit_upper_bounds, transform, a,
a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({2, 3}, {5, 10}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet());
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet());
}
TEST(PropagateBoundsTest, Constant) {
auto transform = IndexTransformBuilder<0, 2>()
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value();
Box<0> a;
TENSORSTORE_ASSERT_OK(PropagateBounds(
Box({2, 1}, {2, 3}),
DimensionSet::FromBools({1, 0}),
DimensionSet::FromBools({0, 0}), transform,
a));
}
TEST(PropagateBoundsTest, ConstantError) {
auto transform = IndexTransformBuilder<0, 2>()
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value();
Box<0> a;
EXPECT_THAT(PropagateBounds(
Box({2, 1}, {2, 3}),
DimensionSet::FromBools({0, 1}),
DimensionSet::FromBools({0, 0}),
transform, a),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Checking bounds of constant output index map for "
"dimension 0: Index 1 is outside valid range .*"));
}
TEST(PropagateBoundsTest, ConstantEmptyDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
(IndexTransformBuilder<2, 1>()
.input_shape({0, 2})
.output_constant(0, 42)
.Finalize()));
Box<2> a;
TENSORSTORE_EXPECT_OK(PropagateBounds(
Box<1>({5}),
DimensionSet(),
DimensionSet(), transform, a));
EXPECT_EQ(a, BoxView({0, 2}));
}
TEST(PropagateBoundsTest, Propagate0Upper1Lower) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_shape({5, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
Box<2> a;
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(PropagateBounds(
Box({2, 3, 4}, {50, 66, 100}),
DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1}), transform,
a, a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({2, -9}, {19 - 2, 13 - -9}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet::FromBools({0, 1}));
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet::FromBools({1, 0}));
}
TEST(PropagateBoundsTest, PropagateImplicitConstraints1) {
const auto transform = IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_exclusive_max({2})
.implicit_upper_bounds({1})
.output_identity_transform()
.Finalize()
.value();
Box<1> a;
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(
PropagateBounds(Box({0}, {4}),
DimensionSet::FromBools({1}),
DimensionSet(), transform, a,
a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({-1}, {5}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet());
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet());
}
TEST(PropagateBoundsTest, PropagateImplicitConstraints2) {
const auto transform = IndexTransformBuilder<1, 2>()
.input_origin({-1})
.input_exclusive_max({2})
.implicit_upper_bounds({1})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value();
Box<1> a;
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(PropagateBounds(
Box({-1, 0}, {3, 4}),
DimensionSet::FromBools({1, 1}),
DimensionSet::FromBools({1, 0}), transform, a,
a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({-1}, {5}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet());
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet());
}
TEST(PropagateBoundsTest, PropagateNegativeStride) {
auto transform = IndexTransformBuilder<2, 1>()
.input_origin({2, 3})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_shape({4, 10})
.output_single_input_dimension(0, 15, -2, 0)
.Finalize()
.value();
const Box<1> b({2}, {50});
Box<2> a;
DimensionSet b_implicit_lower_bounds;
DimensionSet b_implicit_upper_bounds = DimensionSet::FromBools({1});
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(PropagateBounds(
b, b_implicit_lower_bounds, b_implicit_upper_bounds, transform, a,
a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({2, 3}, {7 - 2, 10}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet::FromBools({0, 1}));
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet::FromBools({0, 0}));
}
TEST(PropagateExplicitBoundsTest, Propagate0Upper1Upper) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 10})
.input_shape({5, 11})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({0, 1})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<2> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(Box<>({2, -9}, {5, 22}), a);
}
TEST(PropagateExplicitBoundsTest, PropagateExtraExplicit) {
auto transform = IndexTransformBuilder<3, 3>()
.input_origin({2, 10, 7})
.input_shape({5, 11, 8})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 1, 0})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<3> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(Box<>({2, -9, 7}, {5, 22, 8}), a);
}
TEST(PropagateExplicitBoundsTest, PropagateExtraImplicitLower) {
auto transform = IndexTransformBuilder<3, 3>()
.input_origin({2, 10, 7})
.input_shape({5, 11, 8})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({0, 1, 0})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<3> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(Box<>({2, -9, 7}, {5, 22, 8}), a);
}
TEST(PropagateExplicitBoundsTest, PropagateExtraImplicitUpper) {
auto transform = IndexTransformBuilder<3, 3>()
.input_origin({2, 10, 7})
.input_shape({5, 11, 8})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<3> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(Box<>({2, -9, 7}, {5, 22, 8}), a);
}
TEST(PropagateExplicitBoundsTest, OutOfBounds) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({5, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 60, 100});
Box<2> a;
EXPECT_THAT(
PropagateExplicitBounds(b, transform, a),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagated bounds \\[-9, 11\\), with size=20, for dimension 1 are "
"incompatible with existing bounds \\[3, 13\\), with size=10.*"));
}
TEST(PropagateExplicitBoundsTest, OutOfBoundsEmptyDomain) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({0, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 60, 100});
Box<2> a;
TENSORSTORE_EXPECT_OK(PropagateExplicitBounds(b, transform, a));
}
TEST(PropagateExplicitBoundsTest, OutOfBoundsInfLower) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, -kInfIndex})
.input_shape({5, kInfIndex + 4})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 60, 100});
Box<2> a;
EXPECT_THAT(PropagateExplicitBounds(b, transform, a),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagated bounds \\[-9, 11\\), with size=20, for dimension "
"1 are incompatible with existing bounds \\(-inf, 4\\).*"));
}
TEST(PropagateExplicitBoundsTest, OutOfBoundsInfUpper) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 2})
.input_shape({5, kInfIndex + 1 - 2})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 60, 100});
Box<2> a;
EXPECT_THAT(
PropagateExplicitBounds(b, transform, a),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagated bounds \\[-9, 11\\), with size=20, for dimension 1 are "
"incompatible with existing bounds \\[2, \\+inf\\).*"));
}
TEST(PropagateExplicitBoundsTest, Overflow) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, -kInfIndex})
.input_shape({5, kInfIndex + 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 1, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, kMinFiniteIndex, 4}, {50, -kMinFiniteIndex + 69, 100});
Box<2> a;
EXPECT_THAT(PropagateExplicitBounds(b, transform, a),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Propagating bounds from dimension 1 to input "
"dimension 1: Integer overflow propagating .*"));
}
TEST(PropagateExplicitBoundsTest, ZeroSize) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({5, 0})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<2> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(BoxView({2, 3}, {5, 0}), a);
}
TEST(PropagateExplicitBoundsToTransformTest,
InvalidTransformTreatedAsIdentityTransformDefaultImplicit) {
IndexTransform<2, 2> t;
Box<2> output_domain({1, 2}, {3, 4});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t_expected,
IndexTransformBuilder(2, 2)
.input_bounds(output_domain)
.implicit_lower_bounds({0, 0})
.implicit_upper_bounds({0, 0})
.output_identity_transform()
.Finalize());
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t_expected));
}
TEST(PropagateBoundsToTransformTest,
InvalidTransformTreatedAsIdentityTransformImplicit) {
IndexTransform<2, 2> t;
Box<2> output_domain({1, 2}, {3, 4});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t_expected,
IndexTransformBuilder(2, 2)
.input_bounds(output_domain)
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.output_identity_transform()
.Finalize());
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({1, 0}),
DimensionSet::FromBools({0, 1}), t),
::testing::Optional(t_expected));
}
TEST(PropagateExplicitBoundsToTransformTest, IndexArrayNoPropagationNeeded) {
Box<1> output_domain({1}, {10});
auto t = IndexTransformBuilder<1, 1>()
.input_origin({11})
.input_shape({3})
.output_index_array(0, 2, 3, MakeArray<Index>({1, 2, 1}),
IndexInterval::Closed(1, 2))
.Finalize()
.value();
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t));
}
TEST(PropagateExplicitBoundsToTransformTest, IndexArrayZeroElements) {
Box<2> output_domain({0, 2});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(2, 2)
.input_shape({3, 2})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_index_array(1, 0, 1, MakeArray<Index>({{1, 2}}))
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t_expected,
IndexTransformBuilder(2, 2)
.input_shape({0, 2})
.output_single_input_dimension(0, 0)
.output_constant(1, 0)
.Finalize());
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t_expected));
}
TEST(PropagateExplicitBoundsToTransformTest,
SingleInputDimensionNoPropagationNeeded) {
Box<1> output_domain({1}, {10});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(1, 1)
.input_origin({11})
.input_shape({3})
.output_single_input_dimension(0, -32, 3, 0)
.Finalize());
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t));
}
TEST(PropagateExplicitBoundsToTransformTest, PropagateToIndexRange) {
Box<1> output_domain({1}, {10});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(1, 1)
.input_origin({11})
.input_shape({3})
.output_index_array(0, 2, 3, MakeArray<Index>({1, 2, 1}))
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t_expected,
IndexTransformBuilder(1, 1)
.input_origin({11})
.input_shape({3})
.output_index_array(0, 2, 3, MakeArray<Index>({1, 2, 1}),
IndexInterval::Closed(0, 2))
.Finalize());
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t_expected));
}
TEST(PropagateBoundsToTransformTest, PropagateToIndexRange) {
Box<1> output_domain({1}, {10});
const auto get_transform =
[](tensorstore::Result<IndexInterval> index_range) {
return IndexTransformBuilder<1, 1>()
.input_origin({11})
.input_shape({3})
.output_index_array(0, 2, 3, MakeArray<Index>({1, 2, 1}),
index_range)
.Finalize()
.value();
};
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({0}),
DimensionSet::FromBools({0}),
get_transform(IndexInterval())),
::testing::Optional(get_transform(IndexInterval::Closed(0, 2))));
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({1}),
DimensionSet::FromBools({0}),
get_transform(IndexInterval())),
::testing::Optional(get_transform(IndexInterval::Closed(-kInfIndex, 2))));
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({0}),
DimensionSet::FromBools({1}),
get_transform(IndexInterval())),
::testing::Optional(get_transform(IndexInterval::Closed(0, kInfIndex))));
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({1}),
DimensionSet::FromBools({1}),
get_transform(IndexInterval())),
::testing::Optional(get_transform(IndexInterval())));
}
TEST(PropagateBoundsToTransformTest, PropagateToInputDomain) {
Box<1> output_bounds({1}, {10});
auto t = IndexTransformBuilder<1, 1>()
.implicit_lower_bounds({1})
.implicit_upper_bounds({1})
.output_single_input_dimension(0, -32, 3, 0)
.Finalize()
.value();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto propagated_transform,
PropagateBoundsToTransform(output_bounds, DimensionSet::FromBools({1}),
DimensionSet::FromBools({0}), t));
auto expected_transform = IndexTransformBuilder<1, 1>()
.input_origin({11})
.input_shape({4})
.implicit_lower_bounds({1})
.implicit_upper_bounds({0})
.output_single_input_dimension(0, -32, 3, 0)
.Finalize()
.value();
EXPECT_EQ(expected_transform, propagated_transform);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto output_domain,
IndexDomainBuilder<1>()
.bounds(output_bounds)
.implicit_lower_bounds({1})
.implicit_upper_bounds({0})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto propagated_transform2, PropagateBoundsToTransform(output_domain, t));
EXPECT_EQ(expected_transform, propagated_transform2);
}
TEST(PropagateExplicitBoundsToTransformTest, OutOfBounds) {
auto t = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({5, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> output_domain({2, 3, 4}, {50, 60, 100});
EXPECT_THAT(
PropagateExplicitBoundsToTransform(output_domain, t),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagated bounds \\[-9, 11\\), with size=20, for dimension 1 are "
"incompatible with existing bounds \\[3, 13\\), with size=10.*"));
}
TEST(PropagateExplicitBoundsToTransformTest, Overflow) {
auto t = IndexTransformBuilder<2, 3>()
.input_origin({2, -kInfIndex})
.input_shape({5, kInfIndex + 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 1, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> output_domain({2, kMinFiniteIndex, 4},
{50, -kMinFiniteIndex + 69, 100});
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Propagating bounds from dimension 1 to input "
"dimension 1: Integer overflow propagating .*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/propagate_bounds.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/propagate_bounds_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a8dce167-0d38-46a8-a3e5-9d04905364a8 | cpp | google/tensorstore | mark_explicit_op | tensorstore/index_space/internal/mark_explicit_op.cc | tensorstore/index_space/mark_explicit_op_test.cc | #include "tensorstore/index_space/internal/mark_explicit_op.h"
#include "absl/status/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyChangeImplicitState(
IndexTransform<> transform, DimensionIndexBuffer* dimensions, bool implicit,
bool lower, bool upper, bool domain_only) {
if (!lower && !upper) {
return transform;
}
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
if (implicit) {
for (DimensionIndex output_dim = 0, output_rank = rep->output_rank;
output_dim < output_rank; ++output_dim) {
auto& map = rep->output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::array) continue;
auto& index_array_data = map.index_array_data();
for (DimensionIndex input_dim : *dimensions) {
if (index_array_data.byte_strides[input_dim] != 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot mark input dimension ", input_dim,
" as having implicit bounds because it indexes the index array "
"map for output dimension ",
output_dim));
}
}
}
}
for (DimensionIndex input_dim : *dimensions) {
const auto d = rep->input_dimension(input_dim);
if (lower) d.implicit_lower_bound() = implicit;
if (upper) d.implicit_upper_bound() = implicit;
}
if (!implicit && IsDomainExplicitlyEmpty(rep.get())) {
ReplaceAllIndexArrayMapsWithConstantMaps(rep.get());
}
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::TestDimExpression;
using ::tensorstore::internal_index_space::TestDimExpressionErrorTransformOnly;
TEST(MarkBoundsExplicitTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(true, true),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
TestDimExpression(original_transform,
Dims("x", "z").MarkBoundsExplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(MarkBoundsExplicitTest, IndexArray) {
TestDimExpression(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(0).MarkBoundsExplicit(),
{0},
IndexTransformBuilder(2, 2)
.input_shape({2, 3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
{});
}
TEST(MarkBoundsExplicitTest, IndexArrayZeroSize) {
TestDimExpression(
IndexTransformBuilder(2, 1)
.input_shape({0, 3})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(0).MarkBoundsExplicit(),
{0},
IndexTransformBuilder(2, 2)
.input_shape({0, 3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder(2, 1)
.input_shape({0, 3})
.output_constant(0, 0)
.Finalize()
.value(),
{});
}
TEST(UnsafeMarkBoundsImplicitTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({0, 0, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).UnsafeMarkBoundsImplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{},
true,
false);
TestDimExpression(
original_transform,
Dims(0, 2).UnsafeMarkBoundsImplicit(true, true),
{0, 2},
expected_new_transform,
expected_new_transform,
{},
true,
false);
TestDimExpression(original_transform,
Dims("x", "z").UnsafeMarkBoundsImplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{},
true,
false);
}
TEST(UnsafeMarkBoundsImplicitTest, IndexArray) {
TestDimExpression(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(0).UnsafeMarkBoundsImplicit(false, true),
{0},
IndexTransformBuilder(2, 2)
.input_shape({2, 3})
.implicit_upper_bounds({1, 0})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
{},
true,
false);
}
TEST(UnsafeMarkBoundsImplicitTest, IndexArrayInvalid) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(1).UnsafeMarkBoundsImplicit(false, true),
absl::StatusCode::kInvalidArgument,
"Cannot mark input dimension 1 as having implicit bounds because it "
"indexes the index array map for output dimension 0",
IndexDomainBuilder(2)
.shape({2, 3})
.implicit_upper_bounds({0, 1})
.Finalize()
.value());
}
TEST(MarkBoundsExplicitTest, LowerOnly) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(true, false),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(MarkBoundsExplicitTest, UpperOnly) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({0, 0, 0})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(false, true),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(MarkBoundsExplicitTest, None) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(false, false),
{0, 2},
original_transform,
original_transform,
{});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/mark_explicit_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/mark_explicit_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1ee891c3-4dbe-4905-bfd8-607c15821558 | cpp | google/tensorstore | inverse_transform | tensorstore/index_space/internal/inverse_transform.cc | tensorstore/index_space/inverse_transform_test.cc | #include "tensorstore/index_space/internal/inverse_transform.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
Result<TransformRep::Ptr<>> InverseTransform(TransformRep* transform) {
if (!transform) {
return TransformRep::Ptr<>();
}
const DimensionIndex input_rank = transform->input_rank;
const DimensionIndex output_rank = transform->output_rank;
auto new_transform = TransformRep::Allocate(output_rank, input_rank);
new_transform->input_rank = output_rank;
new_transform->output_rank = input_rank;
new_transform->implicit_lower_bounds = false;
new_transform->implicit_upper_bounds = false;
const auto maps = transform->output_index_maps().first(output_rank);
const auto new_maps = new_transform->output_index_maps().first(input_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& map = maps[output_dim];
const auto new_d = new_transform->input_dimension(output_dim);
switch (map.method()) {
case OutputIndexMethod::array:
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible due to index array "
"map for output dimension ",
output_dim));
case OutputIndexMethod::constant: {
if (!IsFiniteIndex(map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible due to offset ", map.offset(),
" outside valid range ", IndexInterval::FiniteRange(),
" for output dimension ", output_dim));
}
new_d.domain() = IndexInterval::UncheckedSized(map.offset(), 1);
new_d.implicit_lower_bound() = false;
new_d.implicit_upper_bound() = false;
break;
}
case OutputIndexMethod::single_input_dimension: {
if (map.stride() != 1 && map.stride() != -1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible due to "
"stride of ",
map.stride(), " for output dimension ", output_dim));
}
const DimensionIndex input_dim = map.input_dimension();
auto& new_map = new_maps[input_dim];
if (new_map.method() == OutputIndexMethod::single_input_dimension) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible because input dimension ", input_dim,
" maps to output dimensions ", new_map.input_dimension(), " and ",
output_dim));
}
new_map.SetSingleInputDimension(output_dim);
auto new_domain_result = GetAffineTransformRange(
transform->input_dimension(input_dim).optionally_implicit_domain(),
map.offset(), map.stride());
if (!new_domain_result.ok()) {
return MaybeAnnotateStatus(
new_domain_result.status(),
tensorstore::StrCat("Error inverting map from input dimension ",
input_dim, " -> output dimension ",
output_dim));
}
if (map.offset() == std::numeric_limits<Index>::min()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow occurred while inverting map from "
"input dimension ",
input_dim, " -> output dimension ", output_dim));
}
new_map.offset() = -map.offset() * map.stride();
new_map.stride() = map.stride();
new_d.domain() = new_domain_result->interval();
new_d.label() = transform->input_dimension(input_dim).label();
new_d.implicit_lower_bound() = new_domain_result->implicit_lower();
new_d.implicit_upper_bound() = new_domain_result->implicit_upper();
break;
}
}
}
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
auto& new_map = new_maps[input_dim];
if (new_map.method() == OutputIndexMethod::single_input_dimension) {
continue;
}
auto input_domain =
transform->input_dimension(input_dim).optionally_implicit_domain();
if (input_domain.implicit_lower() || input_domain.implicit_upper() ||
input_domain.size() != 1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible due to non-singleton input dimension ",
input_dim, " with domain ", input_domain,
" that is not mapped by an output dimension"));
}
new_map.offset() = input_domain.inclusive_min();
new_map.stride() = 0;
}
internal_index_space::DebugCheckInvariants(new_transform.get());
return new_transform;
}
}
} | #include <cstddef>
#include <limits>
#include <random>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::InverseTransform;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
TEST(InverseTransformTest, Null) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv,
InverseTransform(IndexTransform<>()));
EXPECT_FALSE(inv.valid());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_static,
InverseTransform(IndexTransform<3, 3>()));
EXPECT_FALSE(inv_static.valid());
}
TEST(InverseTransformTest, Example) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(3, 3)
.input_labels({"x", "", "y"})
.input_origin({1, 3, 2})
.input_exclusive_max({5, 4, 8})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 0, 1})
.output_single_input_dimension(0, 5, -1, 2)
.output_single_input_dimension(1, 3, 1, 0)
.output_constant(2, 7)
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto expected_inv,
IndexTransformBuilder(3, 3)
.input_labels({"y", "x", ""})
.input_origin({-2, 4, 7})
.input_exclusive_max({4, 8, 8})
.implicit_lower_bounds({1, 1, 0})
.implicit_upper_bounds({0, 0, 0})
.output_single_input_dimension(0, -3, 1, 1)
.output_constant(1, 3)
.output_single_input_dimension(2, 5, -1, 0)
.Finalize());
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, IdentityRank3) {
auto t =
IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({3, 4, 5})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_identity_transform()
.Finalize()
.value();
EXPECT_EQ(t, InverseTransform(t));
}
TEST(InverseTransformTest, Offsets) {
auto t =
IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({3, 4, 5})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, 6, 1, 0)
.output_single_input_dimension(1, 7, 1, 1)
.output_single_input_dimension(2, 8, 1, 2)
.Finalize()
.value();
auto expected_inv =
IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({9, 11, 13})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, -6, 1, 0)
.output_single_input_dimension(1, -7, 1, 1)
.output_single_input_dimension(2, -8, 1, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, Strides) {
auto t = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({3, 4, 5})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, 0, -1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -1, 2)
.Finalize()
.value();
auto expected_inv = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({-12, 4, -16})
.input_shape({10, 11, 12})
.implicit_lower_bounds({0, 0, 1})
.implicit_upper_bounds({1, 1, 1})
.output_single_input_dimension(0, 0, -1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -1, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, Permutation) {
auto t = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({3, 4, 5})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
auto expected_inv = IndexTransformBuilder<>(3, 3)
.input_labels({"y", "z", "x"})
.input_origin({4, 5, 3})
.input_shape({11, 12, 10})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 1, 0})
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 1)
.output_single_input_dimension(0, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, OffsetsAndStrides) {
auto t = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({9, 11, 13})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, -6, -1, 0)
.output_single_input_dimension(1, -7, 1, 1)
.output_single_input_dimension(2, -8, -1, 2)
.Finalize()
.value();
auto expected_inv = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({-24, 4, -32})
.input_shape({10, 11, 12})
.implicit_lower_bounds({0, 0, 1})
.implicit_upper_bounds({1, 1, 1})
.output_single_input_dimension(0, -6, -1, 0)
.output_single_input_dimension(1, 7, 1, 1)
.output_single_input_dimension(2, -8, -1, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, OffsetsAndStridesAndPermutation) {
auto t = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({9, 11, 13})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, -6, -1, 1)
.output_single_input_dimension(1, -7, 1, 2)
.output_single_input_dimension(2, -8, -1, 0)
.Finalize()
.value();
auto expected_inv = IndexTransformBuilder<>(3, 3)
.input_labels({"y", "z", "x"})
.input_origin({-27, 6, -26})
.input_shape({11, 12, 10})
.implicit_lower_bounds({1, 1, 0})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(1, -6, -1, 0)
.output_single_input_dimension(2, 7, 1, 1)
.output_single_input_dimension(0, -8, -1, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, ErrorNonSingletonUnmappedInputDimension) {
EXPECT_THAT(
InverseTransform(IndexTransformBuilder<>(3, 2)
.output_identity_transform()
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to non-singleton "
"input dimension 2 with domain \\(-inf\\*, \\+inf\\*\\) "
"that is not mapped by an output dimension"));
EXPECT_THAT(InverseTransform(IndexTransformBuilder(1, 0)
.input_origin({0})
.input_shape({2})
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to non-singleton "
"input dimension 0 with domain \\[0, 2\\) "
"that is not mapped by an output dimension"));
EXPECT_THAT(InverseTransform(IndexTransformBuilder(1, 0)
.input_origin({0})
.input_shape({1})
.implicit_lower_bounds({1})
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to non-singleton "
"input dimension 0 with domain \\[0\\*, 1\\) "
"that is not mapped by an output dimension"));
EXPECT_THAT(InverseTransform(IndexTransformBuilder(1, 0)
.input_origin({0})
.input_shape({1})
.implicit_upper_bounds({1})
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to non-singleton "
"input dimension 0 with domain \\[0, 1\\*\\) "
"that is not mapped by an output dimension"));
}
TEST(InverseTransformTest, ConstantMap) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t, IndexTransformBuilder(0, 1).output_constant(0, 42).Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_inv,
IndexTransformBuilder(1, 0)
.input_origin({42})
.input_shape({1})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_inv_with_label,
IndexTransformBuilder(1, 0)
.input_origin({42})
.input_labels({"x"})
.input_shape({1})
.Finalize());
EXPECT_THAT(InverseTransform(t), ::testing::Optional(expected_inv));
EXPECT_THAT(InverseTransform(expected_inv), ::testing::Optional(t));
EXPECT_THAT(InverseTransform(expected_inv_with_label),
::testing::Optional(t));
}
TEST(InverseTransformTest, IndexArrayMap) {
EXPECT_THAT(InverseTransform(
IndexTransformBuilder<>(1, 1)
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({0, 1}))
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to "
"index array map for output dimension 0"));
}
TEST(InverseTransformTest, NonUnitStride) {
EXPECT_THAT(InverseTransform(IndexTransformBuilder<>(1, 1)
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to stride of 2 "
"for output dimension 0"));
}
TEST(InverseTransformTest, Diagonal) {
EXPECT_THAT(InverseTransform(IndexTransformBuilder<>(2, 2)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible because input "
"dimension 0 maps to output dimensions 0 and 1"));
}
TEST(InverseTransformTest, DomainOverflow) {
EXPECT_THAT(InverseTransform(
IndexTransformBuilder<>(1, 1)
.input_origin({10})
.input_shape({5})
.output_single_input_dimension(0, kMaxFiniteIndex, 1, 0)
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error inverting map from input dimension 0 -> "
"output dimension 0: Integer overflow .*"));
}
TEST(InverseTransformTest, OffsetOverflow) {
EXPECT_THAT(
InverseTransform(IndexTransformBuilder<>(1, 1)
.output_single_input_dimension(
0, std::numeric_limits<Index>::min(), 1, 0)
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Integer overflow occurred while inverting map from input "
"dimension 0 -> output dimension 0"));
}
TEST(InverseTransformTest, RandomFromOutputSpace) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_INVERSE_TRANSFORM_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder(box.rank()).bounds(box).Finalize());
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_transform,
InverseTransform(transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_inv_transform,
InverseTransform(inv_transform));
EXPECT_EQ(transform, inv_inv_transform);
}
}
TEST(InverseTransformTest, RandomFromInputSpace) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_INVERSE_TRANSFORM_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder(box.rank()).bounds(box).Finalize());
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForInputSpace(
gen, domain);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_transform,
InverseTransform(transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_inv_transform,
InverseTransform(inv_transform));
EXPECT_EQ(transform, inv_inv_transform);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/inverse_transform.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/inverse_transform_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6a2fe14c-ebdc-42e6-b9c5-437c37204503 | cpp | google/tensorstore | transform_rep | tensorstore/index_space/internal/transform_rep.cc | tensorstore/index_space/transform_rep_test.cc | #include "tensorstore/index_space/internal/transform_rep.h"
#include <memory>
#include <new>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/dimension_labels.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
void FreeIndexArrayData(IndexArrayData* data) {
std::destroy_at(data);
std::free(data);
}
void CopyTrivialFields(TransformRep* source, TransformRep* dest) {
assert(dest->input_rank_capacity >= source->input_rank &&
dest->output_rank_capacity >= source->output_rank);
const DimensionIndex input_rank = dest->input_rank = source->input_rank;
dest->output_rank = source->output_rank;
std::copy_n(source->input_origin().begin(), input_rank,
dest->input_origin().begin());
std::copy_n(source->input_shape().begin(), input_rank,
dest->input_shape().begin());
dest->implicit_lower_bounds = source->implicit_lower_bounds;
dest->implicit_upper_bounds = source->implicit_upper_bounds;
}
}
void CopyInputLabels(TransformRep* source, TransformRep* dest, bool can_move) {
assert(dest->input_rank_capacity >= source->input_rank);
const DimensionIndex input_rank = source->input_rank;
if (can_move) {
std::copy_n(std::make_move_iterator(source->input_labels().begin()),
input_rank, dest->input_labels().begin());
} else {
std::copy_n(source->input_labels().begin(), input_rank,
dest->input_labels().begin());
}
}
void OutputIndexMap::SetConstant() {
if (method() == OutputIndexMethod::array) {
FreeIndexArrayData(&index_array_data());
}
value_ = 0;
}
void OutputIndexMap::SetSingleInputDimension(DimensionIndex input_dim) {
if (method() == OutputIndexMethod::array) {
FreeIndexArrayData(&index_array_data());
}
value_ = (input_dim << 1) | 1;
}
IndexArrayData& OutputIndexMap::SetArrayIndexing(DimensionIndex rank) {
IndexArrayData* data;
if (method() == OutputIndexMethod::array) {
data = &index_array_data();
if (data->rank_capacity >= rank) return *data;
SharedElementPointer<const Index> element_pointer =
std::move(data->element_pointer);
auto bounds = data->index_range;
std::destroy_at(data);
IndexArrayData* new_data = static_cast<IndexArrayData*>(
std::realloc(static_cast<void*>(data),
sizeof(IndexArrayData) + sizeof(Index) * rank));
if (new_data) data = new_data;
new (data) IndexArrayData;
data->element_pointer = std::move(element_pointer);
data->index_range = bounds;
if (!new_data) TENSORSTORE_THROW_BAD_ALLOC;
data->rank_capacity = rank;
} else {
data = static_cast<IndexArrayData*>(
std::malloc(sizeof(IndexArrayData) + sizeof(Index) * rank));
if (!data) {
TENSORSTORE_THROW_BAD_ALLOC;
}
new (data) IndexArrayData;
data->rank_capacity = rank;
}
value_ = reinterpret_cast<std::uintptr_t>(data);
return *data;
}
IndexArrayData& OutputIndexMap::SetArrayIndexing(DimensionIndex rank,
const IndexArrayData& other) {
assert(other.rank_capacity >= rank);
auto& data = SetArrayIndexing(rank);
data.element_pointer = other.element_pointer;
data.index_range = other.index_range;
std::memcpy(data.byte_strides, other.byte_strides, sizeof(Index) * rank);
return data;
}
void OutputIndexMap::Assign(DimensionIndex rank, const OutputIndexMap& other) {
if (other.method() == OutputIndexMethod::array) {
SetArrayIndexing(rank, other.index_array_data());
} else {
value_ = other.value_;
}
offset_ = other.offset_;
stride_ = other.stride_;
}
TransformRep::Ptr<> TransformRep::Allocate(
DimensionIndex input_rank_capacity, DimensionIndex output_rank_capacity) {
ABSL_CHECK(input_rank_capacity >= 0 && output_rank_capacity >= 0 &&
input_rank_capacity <= kMaxRank &&
output_rank_capacity <= kMaxRank);
const size_t total_size =
sizeof(TransformRep) +
sizeof(OutputIndexMap) * output_rank_capacity +
input_rank_capacity * (sizeof(Index) * 2 + sizeof(std::string));
char* base_ptr = static_cast<char*>(::operator new(total_size));
TransformRep* ptr =
new (base_ptr + sizeof(OutputIndexMap) * output_rank_capacity)
TransformRep;
ptr->reference_count.store(1, std::memory_order_relaxed);
ptr->input_rank_capacity = input_rank_capacity;
ptr->output_rank_capacity = output_rank_capacity;
std::uninitialized_default_construct_n(ptr->output_index_maps().begin(),
output_rank_capacity);
std::uninitialized_default_construct_n(ptr->input_labels().begin(),
input_rank_capacity);
return TransformRep::Ptr<>(ptr, internal::adopt_object_ref);
}
void DestroyLabelFields(TransformRep* ptr) {
std::destroy_n(ptr->input_labels().begin(), ptr->input_rank_capacity);
}
void TransformRep::Free(TransformRep* ptr) {
assert(ptr->reference_count == 0);
DestroyLabelFields(ptr);
std::destroy_n(ptr->output_index_maps().begin(), ptr->output_rank_capacity);
::operator delete(static_cast<void*>(ptr->output_index_maps().data()));
}
void CopyTransformRep(TransformRep* source, TransformRep* dest) {
assert(source != nullptr);
assert(dest != nullptr);
assert(dest->output_rank_capacity >= source->output_rank);
CopyTransformRepDomain(source, dest);
const DimensionIndex input_rank = source->input_rank;
const DimensionIndex output_rank = dest->output_rank = source->output_rank;
span<const OutputIndexMap> source_maps =
source->output_index_maps().first(output_rank);
span<OutputIndexMap> dest_maps = dest->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
dest_maps[output_dim].Assign(input_rank, source_maps[output_dim]);
}
}
void CopyTransformRepDomain(TransformRep* source, TransformRep* dest) {
assert(source != nullptr);
assert(dest != nullptr);
assert(dest->input_rank_capacity >= source->input_rank);
const DimensionIndex input_rank = dest->input_rank = source->input_rank;
std::copy_n(source->input_origin().begin(), input_rank,
dest->input_origin().begin());
std::copy_n(source->input_shape().begin(), input_rank,
dest->input_shape().begin());
dest->implicit_lower_bounds = source->implicit_lower_bounds;
dest->implicit_upper_bounds = source->implicit_upper_bounds;
std::copy_n(source->input_labels().begin(), input_rank,
dest->input_labels().begin());
}
void MoveTransformRep(TransformRep* source, TransformRep* dest) {
CopyTrivialFields(source, dest);
std::copy_n(std::make_move_iterator(source->output_index_maps().begin()),
source->output_rank, dest->output_index_maps().begin());
CopyInputLabels(source, dest, true);
}
void ResetOutputIndexMaps(TransformRep* ptr) {
auto output_index_maps = ptr->output_index_maps();
for (DimensionIndex output_dim = 0, output_rank = ptr->output_rank;
output_dim < output_rank; ++output_dim) {
output_index_maps[output_dim].SetConstant();
}
ptr->output_rank = 0;
}
TransformRep::Ptr<> MutableRep(TransformRep::Ptr<> ptr, bool domain_only) {
if (!ptr) return ptr;
if (ptr->is_unique()) {
if (domain_only) {
ResetOutputIndexMaps(ptr.get());
ptr->output_rank = 0;
}
return ptr;
}
if (domain_only) {
auto new_rep = TransformRep::Allocate(ptr->input_rank, 0);
CopyTransformRepDomain(ptr.get(), new_rep.get());
new_rep->output_rank = 0;
internal_index_space::DebugCheckInvariants(new_rep.get());
return new_rep;
} else {
auto new_rep = TransformRep::Allocate(ptr->input_rank, ptr->output_rank);
CopyTransformRep(ptr.get(), new_rep.get());
internal_index_space::DebugCheckInvariants(new_rep.get());
return new_rep;
}
}
TransformRep::Ptr<> NewOrMutableRep(TransformRep* ptr,
DimensionIndex input_rank_capacity,
DimensionIndex output_rank_capacity,
bool domain_only) {
assert(ptr);
if (ptr->input_rank_capacity >= input_rank_capacity &&
ptr->output_rank_capacity >= output_rank_capacity && ptr->is_unique()) {
if (domain_only) {
ResetOutputIndexMaps(ptr);
}
return TransformRep::Ptr<>(ptr);
} else {
return TransformRep::Allocate(input_rank_capacity,
domain_only ? 0 : output_rank_capacity);
}
}
bool IsDomainExplicitlyEmpty(TransformRep* ptr) {
DimensionSet implicit_dims = ptr->implicit_dimensions();
const Index* input_shape = ptr->input_shape().data();
for (DimensionIndex input_dim = 0, input_rank = ptr->input_rank;
input_dim < input_rank; ++input_dim) {
if (!implicit_dims[input_dim] && input_shape[input_dim] == 0) {
return true;
}
}
return false;
}
void ReplaceAllIndexArrayMapsWithConstantMaps(TransformRep* ptr) {
for (DimensionIndex output_dim = 0, output_rank = ptr->output_rank;
output_dim < output_rank; ++output_dim) {
auto& map = ptr->output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::array) continue;
map.SetConstant();
map.offset() = 0;
map.stride() = 0;
}
}
bool AreIndexMapsEqual(const OutputIndexMap& a, const OutputIndexMap& b,
BoxView<> input_domain) {
const auto method = a.method();
if (method != b.method() || a.offset() != b.offset()) return false;
switch (method) {
case OutputIndexMethod::constant:
return true;
case OutputIndexMethod::single_input_dimension:
return a.input_dimension() == b.input_dimension() &&
a.stride() == b.stride();
case OutputIndexMethod::array: {
const auto& index_array_data_a = a.index_array_data();
const auto& index_array_data_b = b.index_array_data();
if (a.stride() != b.stride()) return false;
if (index_array_data_a.index_range != index_array_data_b.index_range) {
return false;
}
return ArrayView<const Index, dynamic_rank, offset_origin>(
index_array_data_a.element_pointer,
StridedLayoutView<dynamic_rank, offset_origin>(
input_domain.rank(), input_domain.origin().data(),
input_domain.shape().data(),
index_array_data_a.byte_strides)) ==
ArrayView<const Index, dynamic_rank, offset_origin>(
index_array_data_b.element_pointer,
StridedLayoutView<dynamic_rank, offset_origin>(
input_domain.rank(), input_domain.origin().data(),
input_domain.shape().data(),
index_array_data_b.byte_strides));
}
}
ABSL_UNREACHABLE();
}
bool AreDomainsEqual(TransformRep* a, TransformRep* b) {
if (!a != !b) return false;
if (!a) return true;
if (a->input_rank != b->input_rank) return false;
const DimensionIndex input_rank = a->input_rank;
const BoxView<> input_domain_a = a->input_domain(input_rank);
if (input_domain_a != b->input_domain(input_rank)) return false;
if (a->implicit_lower_bounds != b->implicit_lower_bounds ||
a->implicit_upper_bounds != b->implicit_upper_bounds) {
return false;
}
span<const std::string> input_labels_a = a->input_labels().first(input_rank);
if (!std::equal(input_labels_a.begin(), input_labels_a.end(),
b->input_labels().begin())) {
return false;
}
return true;
}
bool AreEqual(TransformRep* a, TransformRep* b) {
if (!AreDomainsEqual(a, b)) return false;
if (!a) return true;
if (a->output_rank != b->output_rank) return false;
const DimensionIndex input_rank = a->input_rank;
const DimensionIndex output_rank = a->output_rank;
const BoxView<> input_domain_a = a->input_domain(input_rank);
span<const OutputIndexMap> a_maps = a->output_index_maps().first(output_rank);
span<const OutputIndexMap> b_maps = b->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
if (!AreIndexMapsEqual(a_maps[output_dim], b_maps[output_dim],
input_domain_a)) {
return false;
}
}
return true;
}
void PrintToOstream(std::ostream& os, TransformRep* transform) {
if (!transform) {
os << "<Invalid index space transform>";
return;
}
const DimensionIndex input_rank = transform->input_rank;
const DimensionIndex output_rank = transform->output_rank;
os << "Rank " << transform->input_rank << " -> " << transform->output_rank
<< " index space transform:\n";
os << " Input domain:\n";
const BoxView<> input_domain = transform->input_domain(input_rank);
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
const auto d = transform->input_dimension(input_dim);
os << " " << input_dim << ": " << d.optionally_implicit_domain();
if (!d.label().empty()) {
os << " " << QuoteString(d.label());
}
os << '\n';
}
span<const OutputIndexMap> maps =
transform->output_index_maps().first(output_rank);
Index index_array_shape[kMaxRank];
os << " Output index maps:\n";
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& map = maps[output_dim];
os << " out[" << output_dim << "] = " << map.offset();
if (map.method() != OutputIndexMethod::constant) {
os << " + " << map.stride() << " * ";
}
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension:
os << "in[" << map.input_dimension() << "]";
break;
case OutputIndexMethod::array: {
const auto& index_array_data = map.index_array_data();
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
index_array_shape[input_dim] =
index_array_data.byte_strides[input_dim] == 0
? 1
: input_domain.shape()[input_dim];
}
ArrayView<const Index, dynamic_rank> index_array(
AddByteOffset(
ElementPointer<const Index>(index_array_data.element_pointer),
IndexInnerProduct(input_rank, input_domain.origin().data(),
index_array_data.byte_strides)),
StridedLayoutView<>(input_rank, &index_array_shape[0],
index_array_data.byte_strides));
os << "bounded(" << index_array_data.index_range
<< ", array(in)), where array =\n";
os << " " << index_array;
break;
}
}
os << '\n';
}
}
void PrintDomainToOstream(std::ostream& os, TransformRep* transform) {
if (!transform) {
os << "<invalid index domain>";
return;
}
os << "{ ";
for (DimensionIndex i = 0, rank = transform->input_rank; i < rank; ++i) {
if (i != 0) os << ", ";
const InputDimensionRef dim_ref = transform->input_dimension(i);
const IndexDomainDimension<view> d{dim_ref.optionally_implicit_domain(),
dim_ref.label()};
os << d;
}
os << " }";
}
Result<Index> OutputIndexMap::operator()(
span<const Index> input_indices) const {
Index base_output_index;
switch (method()) {
case OutputIndexMethod::constant:
base_output_index = 0;
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = input_dimension();
assert(input_dim >= 0 && input_dim < input_indices.size());
base_output_index = input_indices[input_dim];
break;
}
case OutputIndexMethod::array: {
const IndexArrayData& data = index_array_data();
assert(data.element_pointer &&
input_indices.size() <= data.rank_capacity);
base_output_index =
data.element_pointer.byte_strided_pointer()[IndexInnerProduct(
input_indices.size(), input_indices.data(), data.byte_strides)];
TENSORSTORE_RETURN_IF_ERROR(
CheckContains(data.index_range, base_output_index),
MaybeAnnotateStatus(
_, "Checking result of index array output index map"));
break;
}
}
return base_output_index * stride() + offset();
}
absl::Status TransformIndices(TransformRep* data,
span<const Index> input_indices,
span<Index> output_indices) {
assert(data && data->input_rank == input_indices.size() &&
data->output_rank == output_indices.size());
const DimensionIndex output_rank = data->output_rank;
const DimensionIndex input_rank = data->input_rank;
span<const OutputIndexMap> output_index_maps =
data->output_index_maps().first(output_rank);
for (DimensionIndex i = 0; i < input_rank; ++i) {
auto oi_interval = data->input_dimension(i).optionally_implicit_domain();
if (!Contains(oi_interval.effective_interval(), input_indices[i])) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Index ", input_indices[i], " is not contained in the domain ",
oi_interval, " for input dimension ", i));
}
}
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
TENSORSTORE_ASSIGN_OR_RETURN(
output_indices[output_dim],
output_index_maps[output_dim](input_indices),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Computing index for output dimension ",
output_dim)));
}
return absl::OkStatus();
}
absl::Status ReplaceZeroRankIndexArrayIndexMap(Index index,
IndexInterval bounds,
Index* output_offset,
Index* output_stride) {
TENSORSTORE_RETURN_IF_ERROR(CheckContains(bounds, index));
Index new_offset;
if (internal::MulOverflow(index, *output_stride, &new_offset) ||
internal::AddOverflow(new_offset, *output_offset, output_offset)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing offset for output dimension."));
}
*output_stride = 0;
return absl::OkStatus();
}
TransformRep::Ptr<> GetSubDomain(TransformRep* rep,
span<const DimensionIndex> dims) {
assert(rep);
[[maybe_unused]] const DimensionIndex old_rank = rep->input_rank;
const DimensionIndex new_rank = dims.size();
auto new_rep = TransformRep::Allocate(new_rank, 0);
new_rep->output_rank = 0;
new_rep->input_rank = new_rank;
#ifndef NDEBUG
DimensionSet seen_dims;
#endif
for (DimensionIndex new_dim = 0; new_dim < dims.size(); ++new_dim) {
const DimensionIndex old_dim = dims[new_dim];
assert(old_dim >= 0 && old_dim < old_rank);
#ifndef NDEBUG
assert(!seen_dims[old_dim]);
seen_dims[old_dim] = true;
#endif
new_rep->input_dimension(new_dim) = rep->input_dimension(old_dim);
}
return new_rep;
}
bool IsUnlabeled(span<const std::string> labels) {
return std::all_of(labels.begin(), labels.end(),
[](std::string_view s) { return s.empty(); });
}
DimensionSet GetIndexArrayInputDimensions(TransformRep* transform) {
DimensionSet set;
const DimensionIndex output_rank = transform->output_rank;
const DimensionIndex input_rank = transform->input_rank;
auto output_maps = transform->output_index_maps();
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = output_maps[output_dim];
if (map.method() != OutputIndexMethod::array) continue;
const auto& index_array_data = map.index_array_data();
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (index_array_data.byte_strides[input_dim] != 0) {
set[input_dim] = true;
}
}
}
return set;
}
TransformRep::Ptr<> WithImplicitDimensions(TransformRep::Ptr<> transform,
DimensionSet implicit_lower_bounds,
DimensionSet implicit_upper_bounds,
bool domain_only) {
transform = MutableRep(std::move(transform), domain_only);
if (!domain_only && (implicit_lower_bounds || implicit_upper_bounds)) {
auto index_array_dims =
internal_index_space::GetIndexArrayInputDimensions(transform.get());
implicit_lower_bounds &= ~index_array_dims;
implicit_upper_bounds &= ~index_array_dims;
}
const auto mask = DimensionSet::UpTo(transform->input_rank);
transform->implicit_lower_bounds = implicit_lower_bounds & mask;
transform->implicit_upper_bounds = implicit_upper_bounds & mask;
return transform;
}
#ifndef NDEBUG
void DebugCheckInvariants(TransformRep* rep) {
assert(rep);
assert(rep->reference_count > 0);
const DimensionIndex input_rank = rep->input_rank,
output_rank = rep->output_rank;
assert(rep->input_rank_capacity <= kMaxRank);
assert(rep->output_rank_capacity <= kMaxRank);
assert(input_rank <= rep->input_rank_capacity);
assert(output_rank <= rep->output_rank_capacity);
assert(input_rank >= 0);
assert(output_rank >= 0);
const auto mask = DimensionSet::UpTo(rep->input_rank);
assert((rep->implicit_lower_bounds & mask) == rep->implicit_lower_bounds);
assert((rep->implicit_upper_bounds & mask) == rep->implicit_upper_bounds);
TENSORSTORE_CHECK_OK(internal::ValidateDimensionLabelsAreUnique(
rep->input_labels().first(input_rank)));
auto input_origin = rep->input_origin().data();
auto input_shape = rep->input_shape().data();
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
TENSORSTORE_CHECK_OK(
IndexInterval::Sized(input_origin[input_dim], input_shape[input_dim]));
}
const bool is_domain_explicitly_empty = IsDomainExplicitlyEmpty(rep);
const auto implicit_dims = rep->implicit_dimensions();
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = rep->output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant: {
assert(map.stride() == 0);
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
assert(input_dim >= 0 && input_dim < input_rank);
assert(map.stride() != 0);
break;
}
case OutputIndexMethod::array: {
assert(map.stride() != 0);
const auto& index_array_data = map.index_array_data();
assert(index_array_data.rank_capacity >= input_rank);
assert(index_array_data.rank_capacity <= kMaxRank);
assert(!is_domain_explicitly_empty);
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
const Index byte_stride = index_array_data.byte_strides[input_dim];
if (byte_stride == 0) continue;
const auto bounds = IndexInterval::UncheckedSized(
input_origin[input_dim], input_shape[input_dim]);
assert(IsFinite(bounds));
assert(!implicit_dims[input_dim]);
}
break;
}
}
}
for (DimensionIndex output_dim = output_rank,
output_rank_capacity = rep->output_rank_capacity;
output_dim < output_rank_capacity; ++output_dim) {
assert(rep->output_index_maps()[output_dim].method() !=
OutputIndexMethod::array);
}
}
#endif
}
} | #include "tensorstore/index_space/internal/transform_rep.h"
#include <cstddef>
#include <limits>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/macros.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
#if ABSL_HAVE_EXCEPTIONS
#define TENSORSTORE_EXPECT_OOM(expr) EXPECT_THROW(expr, std::bad_alloc);
#else
#define TENSORSTORE_EXPECT_OOM(expr) EXPECT_DEATH(expr, "Out of memory");
#endif
using ::tensorstore::Box;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OutputIndexMethod;
using ::tensorstore::internal_index_space::CopyTransformRep;
using ::tensorstore::internal_index_space::MoveTransformRep;
using ::tensorstore::internal_index_space::MutableRep;
using ::tensorstore::internal_index_space::NewOrMutableRep;
using ::tensorstore::internal_index_space::OutputIndexMap;
using ::tensorstore::internal_index_space::ReplaceZeroRankIndexArrayIndexMap;
using ::tensorstore::internal_index_space::TransformAccess;
using ::tensorstore::internal_index_space::TransformRep;
using ::tensorstore::internal_index_space::ValidateAndIntersectBounds;
using ::tensorstore::internal_testing::TestConcurrent;
TEST(OutputIndexMapTest, Basic) {
OutputIndexMap map;
EXPECT_EQ(OutputIndexMethod::constant, map.method());
map.SetSingleInputDimension(2);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(2, map.input_dimension());
map.SetSingleInputDimension(3);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(3, map.input_dimension());
map.SetConstant();
EXPECT_EQ(OutputIndexMethod::constant, map.method());
{
auto& index_array_data = map.SetArrayIndexing(3);
EXPECT_EQ(OutputIndexMethod::array, map.method());
EXPECT_EQ(3, index_array_data.rank_capacity);
EXPECT_EQ(IndexInterval(), index_array_data.index_range);
EXPECT_EQ(nullptr, index_array_data.element_pointer);
EXPECT_EQ(&index_array_data, &map.SetArrayIndexing(1));
EXPECT_EQ(3, index_array_data.rank_capacity);
auto ptr = std::make_shared<Index>();
index_array_data.element_pointer = ptr;
index_array_data.index_range = IndexInterval::UncheckedClosed(1, 10);
index_array_data.byte_strides[0] = 1;
index_array_data.byte_strides[1] = 2;
index_array_data.byte_strides[2] = 3;
auto& new_index_array_data = map.SetArrayIndexing(4);
EXPECT_EQ(4, new_index_array_data.rank_capacity);
EXPECT_EQ(ptr, new_index_array_data.element_pointer.pointer());
EXPECT_EQ(IndexInterval::UncheckedClosed(1, 10),
new_index_array_data.index_range);
EXPECT_EQ(1, new_index_array_data.byte_strides[0]);
EXPECT_EQ(2, new_index_array_data.byte_strides[1]);
EXPECT_EQ(3, new_index_array_data.byte_strides[2]);
}
map.SetSingleInputDimension(3);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(3, map.input_dimension());
{
auto& index_array_data = map.SetArrayIndexing(3);
EXPECT_EQ(OutputIndexMethod::array, map.method());
EXPECT_EQ(3, index_array_data.rank_capacity);
}
}
TEST(OutputIndexMapDeathTest, Basic) {
OutputIndexMap map;
TENSORSTORE_EXPECT_OOM(
map.SetArrayIndexing(static_cast<DimensionIndex>(1) << 60));
map.SetArrayIndexing(5);
TENSORSTORE_EXPECT_OOM(
map.SetArrayIndexing(static_cast<DimensionIndex>(1) << 60));
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, Basic) {
Index output_offset = 5, output_stride = 3;
EXPECT_EQ(absl::OkStatus(), ReplaceZeroRankIndexArrayIndexMap(
10, IndexInterval::UncheckedClosed(3, 15),
&output_offset, &output_stride));
EXPECT_EQ(5 + 10 * 3, output_offset);
EXPECT_EQ(0, output_stride);
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, OutOfBounds) {
Index output_offset = 5, output_stride = 3;
EXPECT_THAT(ReplaceZeroRankIndexArrayIndexMap(
10, IndexInterval::UncheckedClosed(11, 15), &output_offset,
&output_stride),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Index 10 is outside valid range \\[11, 16\\)"));
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, OverflowOffset) {
Index output_offset = std::numeric_limits<Index>::max(), output_stride = 3;
EXPECT_THAT(
ReplaceZeroRankIndexArrayIndexMap(10,
IndexInterval::UncheckedClosed(5, 15),
&output_offset, &output_stride),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
".*Integer overflow computing offset for output dimension.*"));
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, OverflowStride) {
Index output_offset = 5, output_stride = 100;
EXPECT_THAT(
ReplaceZeroRankIndexArrayIndexMap(kMaxFiniteIndex, IndexInterval(),
&output_offset, &output_stride),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
".*Integer overflow computing offset for output dimension.*"));
}
TEST(Allocate, Basic) {
auto ptr = TransformRep::Allocate(3, 2);
EXPECT_EQ(3, ptr->input_rank_capacity);
EXPECT_EQ(2, ptr->output_rank_capacity);
EXPECT_EQ(OutputIndexMethod::constant, ptr->output_index_maps()[0].method());
EXPECT_EQ(OutputIndexMethod::constant, ptr->output_index_maps()[1].method());
EXPECT_TRUE(ptr->input_labels()[0].empty());
EXPECT_TRUE(ptr->input_labels()[1].empty());
EXPECT_TRUE(ptr->input_labels()[2].empty());
}
TEST(CopyTransformRep, Basic) {
auto source = TransformRep::Allocate(1, 2);
source->input_rank = 1;
source->output_rank = 2;
source->input_origin()[0] = 5;
source->input_shape()[0] = 2;
auto& source_map = source->output_index_maps()[0];
source_map.offset() = 3;
source_map.stride() = 4;
auto index_array_ptr = std::make_shared<Index>();
auto& source_index_array_data = source_map.SetArrayIndexing(1);
source_index_array_data.element_pointer = index_array_ptr;
source_index_array_data.byte_strides[0] = 0;
source->input_labels()[0] = "source";
auto dest = TransformRep::Allocate(1, 2);
dest->input_rank = 0;
dest->output_rank = 0;
dest->input_origin()[0] = 6;
dest->input_shape()[0] = 7;
dest->input_labels()[0] = "dest";
auto& dest_map = dest->output_index_maps()[0];
dest_map.offset() = 10;
dest_map.stride() = 11;
CopyTransformRep(source.get(), dest.get());
EXPECT_EQ(5, source->input_origin()[0]);
EXPECT_EQ(2, source->input_shape()[0]);
EXPECT_EQ(3, source_map.offset());
EXPECT_EQ(4, source_map.stride());
EXPECT_EQ(OutputIndexMethod::array, source_map.method());
EXPECT_EQ(&source_index_array_data, &source_map.index_array_data());
EXPECT_EQ(index_array_ptr, source_index_array_data.element_pointer.pointer());
EXPECT_EQ(0, source_index_array_data.byte_strides[0]);
EXPECT_EQ("source", source->input_labels()[0]);
EXPECT_EQ(1, dest->input_rank);
EXPECT_EQ(2, dest->output_rank);
EXPECT_EQ(5, dest->input_origin()[0]);
EXPECT_EQ(2, dest->input_shape()[0]);
EXPECT_EQ(3, dest_map.offset());
EXPECT_EQ(4, dest_map.stride());
EXPECT_EQ(OutputIndexMethod::array, dest_map.method());
auto& dest_index_array_data = dest_map.index_array_data();
EXPECT_EQ(index_array_ptr, dest_index_array_data.element_pointer.pointer());
EXPECT_EQ(0, dest_index_array_data.byte_strides[0]);
EXPECT_EQ(3, index_array_ptr.use_count());
EXPECT_EQ("source", dest->input_labels()[0]);
}
TEST(MoveTransformRep, Basic) {
using ::tensorstore::DimensionSet;
auto source = TransformRep::Allocate(1, 2);
source->input_rank = 1;
source->output_rank = 2;
source->implicit_lower_bounds = DimensionSet::UpTo(source->input_rank);
source->implicit_upper_bounds = DimensionSet::UpTo(source->input_rank);
source->input_origin()[0] = 5;
source->input_shape()[0] = 2;
auto& source_map = source->output_index_maps()[0];
source_map.SetSingleInputDimension(0);
source_map.offset() = 3;
source_map.stride() = 4;
auto index_array_ptr = std::make_shared<Index>();
auto& source_index_array_data = source_map.SetArrayIndexing(1);
source_index_array_data.element_pointer = index_array_ptr;
source_index_array_data.byte_strides[0] = 0;
source->input_labels()[0] = "source";
auto dest = TransformRep::Allocate(1, 2);
dest->input_rank = 0;
dest->output_rank = 0;
dest->input_origin()[0] = 6;
dest->input_shape()[0] = 7;
dest->input_labels()[0] = "dest";
auto& dest_map = dest->output_index_maps()[0];
dest_map.offset() = 10;
dest_map.stride() = 11;
MoveTransformRep(source.get(), dest.get());
EXPECT_EQ(5, source->input_origin()[0]);
EXPECT_EQ(2, source->input_shape()[0]);
EXPECT_EQ(3, source_map.offset());
EXPECT_EQ(4, source_map.stride());
EXPECT_EQ(OutputIndexMethod::constant, source_map.method());
EXPECT_EQ(1, dest->input_rank);
EXPECT_EQ(2, dest->output_rank);
EXPECT_EQ(5, dest->input_origin()[0]);
EXPECT_EQ(2, dest->input_shape()[0]);
EXPECT_EQ(3, dest_map.offset());
EXPECT_EQ(4, dest_map.stride());
EXPECT_EQ(OutputIndexMethod::array, dest_map.method());
auto& dest_index_array_data = dest_map.index_array_data();
EXPECT_EQ(&dest_index_array_data, &source_index_array_data);
EXPECT_EQ(index_array_ptr, dest_index_array_data.element_pointer.pointer());
EXPECT_EQ(0, dest_index_array_data.byte_strides[0]);
EXPECT_EQ(2, index_array_ptr.use_count());
EXPECT_EQ("source", dest->input_labels()[0]);
}
tensorstore::IndexTransform<> MakeTestTransform() {
return IndexTransformBuilder<>(3, 3)
.input_origin({1, 2, 3})
.input_shape({2, 3, 4})
.input_labels({"a", "b", "c"})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 1, 1})
.output_constant(2, 5)
.output_single_input_dimension(1, 5, 7, 2)
.output_index_array(0, 8, 11,
tensorstore::MakeArray<Index>({{{8}}, {{9}}}),
tensorstore::IndexInterval::Sized(7, 3))
.Finalize()
.value();
}
TEST(MutableRepTest, Basic) {
auto transform = MakeTestTransform();
EXPECT_TRUE(TransformAccess::rep(transform)->is_unique());
auto rep1 = TransformAccess::rep_ptr<tensorstore::container>(transform);
EXPECT_FALSE(TransformAccess::rep(transform)->is_unique());
auto rep2 = MutableRep(std::move(rep1));
EXPECT_NE(TransformAccess::rep(transform), rep2.get());
EXPECT_EQ(transform, TransformAccess::Make<tensorstore::IndexTransformView<>>(
rep2.get()));
EXPECT_TRUE(rep2->is_unique());
TransformRep* rep2_ptr = rep2.get();
auto rep3 = MutableRep(std::move(rep2));
EXPECT_EQ(rep2_ptr, rep3.get());
}
TEST(MutableRepTest, Concurrent) {
auto orig = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({2})
.input_labels({"a"})
.implicit_lower_bounds({0})
.implicit_upper_bounds({0})
.output_constant(0, 5)
.Finalize()
.value();
TransformRep* orig_ptr;
TransformRep::Ptr<> write_ptr = TransformAccess::rep_ptr(orig);
write_ptr->output_rank = 0;
TransformRep::Ptr<> read_ptr;
[[maybe_unused]] size_t num_reads_before_write = 0;
const size_t num_iterations = 1000;
TestConcurrent(
num_iterations,
[&] {
write_ptr->input_rank = 1;
orig_ptr = write_ptr.get();
read_ptr = write_ptr;
},
[&] { EXPECT_EQ(0, write_ptr->input_rank); },
[&] {
write_ptr = MutableRep(std::move(write_ptr));
if (orig_ptr == write_ptr.get()) {
++num_reads_before_write;
}
write_ptr->input_rank = 0;
},
[&] {
EXPECT_EQ(1, read_ptr->input_rank);
read_ptr.reset();
});
#if 0
EXPECT_LT(0, num_reads_before_write);
EXPECT_LT(num_reads_before_write, num_iterations);
#endif
}
TEST(NewOrMutableRepTest, Basic) {
auto transform = MakeTestTransform();
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 3);
EXPECT_EQ(TransformAccess::rep(transform), mutable_rep.get());
}
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 2, 2);
EXPECT_EQ(TransformAccess::rep(transform), mutable_rep.get());
}
{
auto transform_copy = transform;
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 3);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(3, mutable_rep->input_rank_capacity);
EXPECT_EQ(3, mutable_rep->output_rank_capacity);
}
{
auto transform_copy = transform;
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 1, 2);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(1, mutable_rep->input_rank_capacity);
EXPECT_EQ(2, mutable_rep->output_rank_capacity);
}
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 4);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(3, mutable_rep->input_rank_capacity);
EXPECT_EQ(4, mutable_rep->output_rank_capacity);
}
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 4, 3);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(4, mutable_rep->input_rank_capacity);
EXPECT_EQ(3, mutable_rep->output_rank_capacity);
}
{
auto transform_copy = transform;
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 4);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(3, mutable_rep->input_rank_capacity);
EXPECT_EQ(4, mutable_rep->output_rank_capacity);
}
}
TEST(ValidateAndIntersectBoundsTest, Success) {
const Box<> inner({-kInfIndex, 6}, {kInfIndex + 8, 3});
Box<> combined({1, 5}, {9, kInfIndex - 5 + 1});
auto status = ValidateAndIntersectBounds(
inner, combined, [](IndexInterval outer, IndexInterval inner) {
return ContainsOrUnbounded(outer, inner);
});
TENSORSTORE_CHECK_OK(status);
EXPECT_EQ(Box<>({1, 6}, {7, 3}), combined);
}
TEST(ValidateAndIntersectBoundsTest, Failure) {
const Box<> inner({-kInfIndex, 4}, {kInfIndex + 8, 3});
Box<> combined({1, 5}, {9, kInfIndex - 5 + 1});
auto status = ValidateAndIntersectBounds(
inner, combined, [](IndexInterval outer, IndexInterval inner) {
return ContainsOrUnbounded(outer, inner);
});
EXPECT_THAT(
status,
MatchesStatus(
absl::StatusCode::kOutOfRange,
".*Propagated bounds are incompatible with existing bounds in "
"dimension 1 bounds .* vs. propagated bounds.*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/transform_rep.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_rep_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
866239b7-d9d3-4781-9af5-36f32ea4978f | cpp | google/tensorstore | diagonal_op | tensorstore/index_space/internal/diagonal_op.cc | tensorstore/index_space/diagonal_op_test.cc | #include "tensorstore/index_space/internal/diagonal_op.h"
#include <algorithm>
namespace tensorstore {
namespace internal_index_space {
namespace {
template <typename R>
void ShiftRangeForwardByOne(R range) {
for (DimensionIndex i = range.size() - 1; i > 0; --i) {
range[i] = range[i - 1];
}
}
void ExtractDiagonal(TransformRep* original, TransformRep* result,
DimensionIndexBuffer* dimensions, bool domain_only) {
const DimensionIndex orig_input_rank = original->input_rank;
const DimensionIndex output_rank = domain_only ? 0 : original->output_rank;
const DimensionIndex new_input_rank =
orig_input_rank - dimensions->size() + 1;
assert(result->input_rank_capacity >= new_input_rank);
const DimensionIndex diag_input_dim = 0;
DimensionIndex orig_to_new_input_dim[kMaxRank];
std::fill_n(&orig_to_new_input_dim[0], orig_input_rank,
static_cast<DimensionIndex>(-1));
bool lower_diagonal_bound_implicit = true,
upper_diagonal_bound_implicit = true;
IndexInterval diagonal_bounds;
for (DimensionIndex orig_input_dim : *dimensions) {
orig_to_new_input_dim[orig_input_dim] = diag_input_dim;
const auto d = original->input_dimension(orig_input_dim);
diagonal_bounds = Intersect(diagonal_bounds, d.domain());
if (!d.implicit_lower_bound()) {
lower_diagonal_bound_implicit = false;
}
if (!d.implicit_upper_bound()) {
upper_diagonal_bound_implicit = false;
}
}
for (DimensionIndex orig_input_dim = 0, new_input_dim = 1;
orig_input_dim < orig_input_rank; ++orig_input_dim) {
if (orig_to_new_input_dim[orig_input_dim] == -1) {
orig_to_new_input_dim[orig_input_dim] = new_input_dim++;
}
}
const bool domain_is_explicitly_empty = !lower_diagonal_bound_implicit &&
!upper_diagonal_bound_implicit &&
diagonal_bounds.empty();
span<const OutputIndexMap> orig_maps =
original->output_index_maps().first(output_rank);
span<OutputIndexMap> result_maps =
result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& orig_map = orig_maps[output_dim];
auto& result_map = result_maps[output_dim];
result_map.stride() = orig_map.stride();
result_map.offset() = orig_map.offset();
switch (orig_map.method()) {
case OutputIndexMethod::constant:
result_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex orig_input_dim = orig_map.input_dimension();
assert(orig_input_dim >= 0 && orig_input_dim < orig_input_rank);
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
result_map.SetSingleInputDimension(new_input_dim);
break;
}
case OutputIndexMethod::array: {
if (domain_is_explicitly_empty) {
result_map.SetConstant();
result_map.stride() = 0;
result_map.offset() = 0;
break;
}
auto& result_index_array = result_map.SetArrayIndexing(new_input_rank);
const auto& orig_index_array = orig_map.index_array_data();
assert(orig_index_array.rank_capacity >= orig_input_rank);
Index diag_byte_stride = 0;
for (DimensionIndex orig_input_dim : *dimensions) {
diag_byte_stride += orig_index_array.byte_strides[orig_input_dim];
}
for (DimensionIndex orig_input_dim = 0;
orig_input_dim < orig_input_rank; ++orig_input_dim) {
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
if (new_input_dim == diag_input_dim) continue;
assert(new_input_dim - 1 <= orig_input_dim);
result_index_array.byte_strides[new_input_dim - 1] =
orig_index_array.byte_strides[orig_input_dim];
}
ShiftRangeForwardByOne(
span(result_index_array.byte_strides, new_input_rank));
result_index_array.byte_strides[diag_input_dim] = diag_byte_stride;
result_index_array.index_range = orig_index_array.index_range;
result_index_array.element_pointer =
orig_index_array.element_pointer.pointer();
break;
}
}
}
for (DimensionIndex orig_input_dim = 0; orig_input_dim < orig_input_rank;
++orig_input_dim) {
const DimensionIndex new_input_dim = orig_to_new_input_dim[orig_input_dim];
if (new_input_dim == diag_input_dim) continue;
assert(new_input_dim - 1 <= orig_input_dim);
result->input_dimension(new_input_dim - 1) =
original->input_dimension(orig_input_dim);
}
ShiftRangeForwardByOne(result->all_input_dimensions(new_input_rank));
{
const auto d = result->input_dimension(diag_input_dim);
d.domain() = diagonal_bounds;
d.implicit_lower_bound() = lower_diagonal_bound_implicit;
d.implicit_upper_bound() = upper_diagonal_bound_implicit;
d.SetEmptyLabel();
}
result->input_rank = new_input_rank;
result->output_rank = output_rank;
dimensions->clear();
dimensions->push_back(diag_input_dim);
NormalizeImplicitBounds(*result);
}
}
Result<IndexTransform<>> ApplyDiagonal(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
TransformRep* rep = TransformAccess::rep(transform);
const DimensionIndex new_input_rank =
rep->input_rank - dimensions->size() + 1;
TransformRep::Ptr<> new_rep =
NewOrMutableRep(rep, new_input_rank, rep->output_rank, domain_only);
ExtractDiagonal(rep, new_rep.get(), dimensions, domain_only);
internal_index_space::DebugCheckInvariants(new_rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(DiagonalTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({5, 4, 5})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<2, 3>()
.input_origin({3, 2})
.input_shape({3, 4})
.input_labels({"", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{4, 3, 4}, {4, 3}},
};
TestDimExpression(original_transform,
Dims(0, 2).Diagonal(),
{0},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").Diagonal(),
{0},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(DiagonalTest, ZeroDimensional) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({5, 4})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 5, 1, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
Dims().Diagonal(),
{0},
IndexTransformBuilder<3, 2>()
.input_origin({-kInfIndex, 1, 2})
.input_shape({kInfSize, 5, 4})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"", "x", "y"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({-kInfIndex, 1, 2})
.input_shape({kInfSize, 5, 4})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"", "x", "y"})
.output_single_input_dimension(0, 5, 1, 1)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
{{{3, 4}, {8, 3, 4}}},
false);
}
TEST(DiagonalTest, OneDimensional) {
TestDimExpression(IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({5, 4, 5})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.Finalize()
.value(),
Dims(1).Diagonal(),
{0},
IndexTransformBuilder<3, 3>()
.input_origin({2, 1, 3})
.input_shape({4, 5, 5})
.input_labels({"", "x", "z"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 3>()
.input_origin({2, 1, 3})
.input_shape({4, 5, 5})
.input_labels({"", "x", "z"})
.output_single_input_dimension(0, 5, 1, 1)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 2)
.Finalize()
.value(),
{{{4, 3, 5}, {3, 4, 5}}});
}
TEST(DiagonalTest, TwoDimensionalSimple) {
TestDimExpression(IndexTransformBuilder<3, 3>()
.input_origin({5, 6, 7})
.input_shape({10, 9, 15})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 2)
.Finalize()
.value(),
Dims(2, 0).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 0)
.Finalize()
.value(),
{{{10, 11, 10}, {10, 11}}});
}
TEST(DiagonalTest, TwoDimensionalSimpleImplicitLower) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({5, 6, 7})
.input_shape({10, 9, 15})
.implicit_lower_bounds({1, 0, 1})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 2)
.Finalize()
.value(),
Dims(2, 0).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_lower_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_lower_bounds({1, 0})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 0)
.Finalize()
.value(),
{{{10, 11, 10}, {10, 11}}});
}
TEST(DiagonalTest, TwoDimensionalSimpleImplicitUpper) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({5, 6, 7})
.input_shape({10, 9, 15})
.implicit_upper_bounds({1, 0, 1})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 2)
.Finalize()
.value(),
Dims(2, 0).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 0)
.Finalize()
.value(),
{{{10, 11, 10}, {10, 11}}});
}
TEST(DiagonalTest, IndexArray) {
TestDimExpression(
IndexTransformBuilder<3, 2>()
.input_origin({5, 6, 6})
.input_shape({4, 5, 2})
.output_index_array(
0, 2, 3,
MakeArray<Index>(
{{{1, 4}}, {{2, 5}}, {{3, 6}}, {{4, 7}}}))
.output_constant(1, 0)
.Finalize()
.value(),
Dims(0, 2).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({6, 6})
.input_shape({2, 5})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({6, 6})
.input_shape({2, 5})
.output_index_array(0, 2, 3,
MakeArray<Index>({{2}, {6}}))
.output_constant(1, 0)
.Finalize()
.value(),
{{{6, 8, 6}, {6, 8}}});
}
TEST(DiagonalTest, IndexArrayZeroSize) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_shape({0, 2})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_index_array(1, 0, 1, MakeArray<Index>({{1, 2}}))
.Finalize()
.value(),
Dims(0, 1).Diagonal(),
{0},
IndexTransformBuilder<1, 2>()
.input_shape({0})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_shape({0})
.output_single_input_dimension(0, 0)
.output_constant(1, 0)
.Finalize()
.value(),
{});
}
TEST(DiagonalTest, Labeled) {
TestDimExpression(
IndexTransformBuilder<3, 2>()
.input_origin({5, 6, 6})
.input_shape({4, 5, 2})
.input_labels({"a", "b", "c"})
.output_index_array(
0, 2, 3,
MakeArray<Index>(
{{{1, 4}}, {{2, 5}}, {{3, 6}}, {{4, 7}}}))
.output_constant(1, 0)
.Finalize()
.value(),
Dims(0, 2).Diagonal().Label("diag"),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({6, 6})
.input_shape({2, 5})
.input_labels({"diag", "b"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({6, 6})
.input_shape({2, 5})
.input_labels({"diag", "b"})
.output_index_array(0, 2, 3,
MakeArray<Index>({{2}, {6}}))
.output_constant(1, 0)
.Finalize()
.value(),
{{{6, 8, 6}, {6, 8}}});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/diagonal_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/diagonal_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
58fd0fed-e6ea-4f5d-b2d5-9570483b8c99 | cpp | google/tensorstore | single_index_slice_op | tensorstore/index_space/internal/single_index_slice_op.cc | tensorstore/index_space/single_index_slice_op_test.cc | #include "tensorstore/index_space/internal/single_index_slice_op.h"
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
struct InputDimensionSingletonSliceInfo {
DimensionIndex new_input_dim;
Index offset;
};
struct SingletonSlicingInfo {
explicit SingletonSlicingInfo(DimensionIndex original_input_rank,
DimensionIndex new_input_rank)
: original_input_rank(original_input_rank),
new_input_rank(new_input_rank) {
std::fill_n(&original_input_dimension_info[0], original_input_rank,
InputDimensionSingletonSliceInfo{0, 0});
}
DimensionIndex original_input_rank;
DimensionIndex new_input_rank;
InputDimensionSingletonSliceInfo original_input_dimension_info[kMaxRank];
};
Result<SingletonSlicingInfo> GetSingletonSlicingInfo(
TransformRep* original, DimensionIndexBuffer* dimensions_buffer,
IndexVectorOrScalarView indices) {
const span<const DimensionIndex> dimensions(*dimensions_buffer);
const DimensionIndex num_dims = dimensions.size();
const DimensionIndex original_input_rank = original->input_rank;
const DimensionIndex new_input_rank = original_input_rank - num_dims;
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(indices, num_dims));
Result<SingletonSlicingInfo> result(tensorstore::in_place,
original_input_rank, new_input_rank);
const Index* indices_pointer =
indices.pointer ? indices.pointer : &indices.size_or_scalar;
const Index indices_stride = indices.pointer ? 1 : 0;
std::string slice_error;
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex original_input_dim = dimensions[i];
const Index index = indices_pointer[i * indices_stride];
const auto domain = original->input_dimension(original_input_dim)
.optionally_implicit_domain();
if (!Contains(domain.effective_interval(), index)) {
tensorstore::StrAppend(&slice_error, (slice_error.empty() ? "" : ", "),
"in input dimension ", original_input_dim,
" index ", index, " is outside valid domain ",
domain);
}
result->original_input_dimension_info[original_input_dim] =
InputDimensionSingletonSliceInfo{-1, index};
}
if (!slice_error.empty()) {
result = absl::OutOfRangeError(
tensorstore::StrCat("Slice mismatch: ", slice_error));
return result;
}
for (DimensionIndex original_input_dim = 0, new_input_dim = 0;
original_input_dim < original_input_rank; ++original_input_dim) {
auto& new_dim =
result->original_input_dimension_info[original_input_dim].new_input_dim;
if (new_dim == -1) continue;
new_dim = new_input_dim;
++new_input_dim;
}
dimensions_buffer->clear();
return result;
}
absl::Status PerformSingleIndexSlice(TransformRep* original_transform,
TransformRep* new_transform,
const SingletonSlicingInfo& info,
bool domain_only) {
const DimensionIndex original_input_rank = original_transform->input_rank;
const DimensionIndex new_input_rank = info.new_input_rank;
span<const InputDimensionSingletonSliceInfo> original_input_dimension_info =
info.original_input_dimension_info;
bool domain_is_explicitly_empty = false;
for (DimensionIndex original_input_dim = 0, new_input_dim = 0;
original_input_dim < original_input_rank; ++original_input_dim) {
if (original_input_dimension_info[original_input_dim].new_input_dim < 0)
continue;
const InputDimensionRef new_dim_ref =
new_transform->input_dimension(new_input_dim);
new_dim_ref = original_transform->input_dimension(original_input_dim);
if (new_dim_ref.domain().empty() && !new_dim_ref.implicit_lower_bound() &&
!new_dim_ref.implicit_upper_bound()) {
domain_is_explicitly_empty = true;
}
++new_input_dim;
}
const DimensionIndex output_rank =
domain_only ? 0 : original_transform->output_rank;
span<const OutputIndexMap> original_maps =
original_transform->output_index_maps().first(output_rank);
span<OutputIndexMap> new_maps =
new_transform->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const OutputIndexMap& original_map = original_maps[output_dim];
OutputIndexMap& new_map = new_maps[output_dim];
switch (original_map.method()) {
case OutputIndexMethod::constant: {
new_map.offset() = original_map.offset();
new_map.SetConstant();
new_map.stride() = 0;
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex original_input_dim =
original_map.input_dimension();
assert(original_input_dim >= 0 &&
original_input_dim < original_input_rank);
const auto slice_info =
original_input_dimension_info[original_input_dim];
const Index output_stride = original_map.stride();
const Index output_offset = original_map.offset();
if (slice_info.new_input_dim == -1) {
Index new_offset;
if (internal::MulOverflow(slice_info.offset, output_stride,
&new_offset) ||
internal::AddOverflow(new_offset, output_offset,
&new_map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing offset for output dimension ",
output_dim, "."));
}
new_map.SetConstant();
new_map.stride() = 0;
} else {
new_map.SetSingleInputDimension(slice_info.new_input_dim);
new_map.stride() = output_stride;
new_map.offset() = output_offset;
}
break;
}
case OutputIndexMethod::array: {
if (domain_is_explicitly_empty) {
new_map.SetConstant();
new_map.offset() = 0;
new_map.stride() = 0;
break;
}
const IndexArrayData& original_index_array_data =
original_map.index_array_data();
IndexArrayData& new_index_array_data =
new_map.SetArrayIndexing(new_input_rank);
new_index_array_data.index_range =
original_index_array_data.index_range;
Index array_byte_offset = 0;
bool has_non_zero_byte_strides = false;
for (DimensionIndex original_input_dim = 0;
original_input_dim < original_input_rank; ++original_input_dim) {
const auto slice_info =
original_input_dimension_info[original_input_dim];
const Index byte_stride =
original_index_array_data.byte_strides[original_input_dim];
if (slice_info.new_input_dim == -1) {
array_byte_offset = internal::wrap_on_overflow::Add(
array_byte_offset, internal::wrap_on_overflow::Multiply(
byte_stride, slice_info.offset));
} else {
new_index_array_data.byte_strides[slice_info.new_input_dim] =
byte_stride;
if (byte_stride != 0) has_non_zero_byte_strides = true;
}
}
Index output_stride = original_map.stride();
Index output_offset = original_map.offset();
if (has_non_zero_byte_strides) {
new_index_array_data.element_pointer = AddByteOffset(
original_index_array_data.element_pointer, array_byte_offset);
} else {
TENSORSTORE_RETURN_IF_ERROR(ReplaceZeroRankIndexArrayIndexMap(
original_index_array_data.element_pointer
.byte_strided_pointer()[array_byte_offset],
new_index_array_data.index_range, &output_offset,
&output_stride));
new_map.SetConstant();
}
new_map.stride() = output_stride;
new_map.offset() = output_offset;
break;
}
}
}
new_transform->input_rank = new_input_rank;
new_transform->output_rank = output_rank;
NormalizeImplicitBounds(*new_transform);
internal_index_space::DebugCheckInvariants(new_transform);
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplySingleIndexSlice(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView indices,
bool domain_only) {
TransformRep* rep = TransformAccess::rep(transform);
auto slicing_info = GetSingletonSlicingInfo(rep, dimensions, indices);
if (!slicing_info) return slicing_info.status();
auto new_rep = NewOrMutableRep(rep, slicing_info->new_input_rank,
rep->output_rank, domain_only);
TENSORSTORE_RETURN_IF_ERROR(
PerformSingleIndexSlice(rep, new_rep.get(), *slicing_info, domain_only));
return TransformAccess::Make<IndexTransform<>>(new_rep);
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(SingleIndexSliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<1, 3>()
.input_origin({2})
.input_shape({4})
.input_labels({"y"})
.output_constant(0, 2)
.output_single_input_dimension(1, 0)
.output_constant(2, 4)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 4}, {3}},
};
TestDimExpression(original_transform,
Dims(0, 2).IndexSlice({2, 4}),
{},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").IndexSlice({2, 4}),
{},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(SingleIndexSliceTest, ImplicitLowerBound) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.implicit_lower_bounds({1, 1, 0})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<1, 3>()
.input_origin({2})
.implicit_lower_bounds({1})
.input_shape({4})
.input_labels({"y"})
.output_constant(0, -7)
.output_single_input_dimension(1, 0)
.output_constant(2, 4)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{-7, 3, 4}, {3}},
};
TestDimExpression(original_transform,
Dims(0, 2).IndexSlice({-7, 4}),
{},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(SingleIndexSliceTest, DimSubsetUniformIndexArrayRetained) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice(3),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 3)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 3 * 3)
.output_constant(2, 3)
.output_index_array(3, 4, 1,
MakeArray<Index>({6, 16, 26, 36}))
.Finalize()
.value(),
{{{4, 3, 3}, {4}}});
}
TEST(SingleIndexSliceTest, DimSubsetUniformIndexArrayEliminated) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice(3),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 3)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 3 * 3)
.output_constant(2, 3)
.output_constant(3, 4 + 1 * 6)
.Finalize()
.value(),
{{{4, 3, 3}, {4}}});
}
TEST(SingleIndexSliceTest, DimSubsetNonUniform) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice({3, 4}),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 4)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 4 * 3)
.output_constant(2, 3)
.output_index_array(3, 4, 1,
MakeArray<Index>({6, 16, 26, 36}))
.Finalize()
.value(),
{{{4, 3, 4}, {4}}});
}
TEST(SingleIndexSliceTest, DimSubsetNonUniformLabeled) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice({3, 4}),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 4)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.input_labels({"x"})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 4 * 3)
.output_constant(2, 3)
.output_index_array(3, 4, 1,
MakeArray<Index>({6, 16, 26, 36}))
.Finalize()
.value(),
{{{4, 3, 4}, {4}}});
}
TEST(SingleIndexSliceTest, EmptyDomain) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({0, 3})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 2, 7, 0)
.output_index_array(1, 4, 3,
MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(1).IndexSlice({3}),
{},
IndexTransformBuilder<1, 2>()
.input_origin({1})
.input_shape({0})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_origin({1})
.input_shape({0})
.input_labels({"x"})
.output_single_input_dimension(0, 2, 7, 0)
.output_constant(1, 0)
.Finalize()
.value(),
{});
}
TEST(ErrorHandlingTest, DimensionSelectionRankMismatch) {
TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().IndexSlice(span<const Index>({1, 2})),
absl::StatusCode::kInvalidArgument,
"Number of dimensions .* does not match number of "
"indices .*");
}
TEST(ErrorHandlingTest, OutOfBounds) {
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({-10})
.input_shape({15})
.Finalize()
.value(),
AllDims().IndexSlice({5}),
absl::StatusCode::kOutOfRange,
"Slice mismatch: .* is outside valid domain .*");
}
TEST(ErrorHandlingTest, OutOfBoundsInfinity) {
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({-kInfIndex})
.input_shape({15})
.Finalize()
.value(),
AllDims().IndexSlice({-kInfIndex}),
absl::StatusCode::kOutOfRange,
"Slice mismatch: .* is outside valid domain .*");
}
TEST(ErrorHandlingTest, SingleInputDimensionMapIntegerOverflow) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({10})
.output_single_input_dimension(0, std::numeric_limits<Index>::max(),
1, 0)
.Finalize()
.value(),
AllDims().IndexSlice({1}), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension.*",
IndexDomainBuilder<0>().Finalize().value());
}
TEST(ErrorHandlingTest, IndexArrayMapIntegerOverflow) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_index_array(0, std::numeric_limits<Index>::max(), 1,
MakeArray<Index>({0, 1, 2}))
.Finalize()
.value(),
AllDims().IndexSlice({1}), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension.*",
IndexDomainBuilder<0>().Finalize().value());
}
TEST(ErrorHandlingTest, IndexArrayMapOutOfBounds) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({0, 1, 2}),
IndexInterval::Closed(-5, -3))
.Finalize()
.value(),
AllDims().IndexSlice({1}), absl::StatusCode::kOutOfRange,
"Index .* is outside valid range .*",
IndexDomainBuilder<0>().Finalize().value());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/single_index_slice_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/single_index_slice_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c6f622d4-32e5-476d-a55e-af17aa112a86 | cpp | google/tensorstore | index_array_slice_op | tensorstore/index_space/internal/index_array_slice_op.cc | tensorstore/index_space/index_array_slice_op_test.cc | #include "tensorstore/index_space/internal/index_array_slice_op.h"
#include <algorithm>
#include <numeric>
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
bool BroadcastSizes(Index source, Index* result) {
if (source == *result) return true;
if (*result == 1) {
*result = source;
return true;
} else if (source == 1) {
return true;
}
return false;
}
bool BroadcastShapes(span<const Index> source_shape, span<Index> result_shape) {
if (source_shape.size() != result_shape.size()) return false;
for (DimensionIndex i = 0; i < source_shape.size(); ++i) {
if (!BroadcastSizes(source_shape[i], &result_shape[i])) return false;
}
return true;
}
template <typename GetNewDimensionShapeFn, typename GetIndexArrayBasePointerFn,
typename GetIndexArrayByteStrideFn>
Result<TransformRep::Ptr<>> MakeTransformFromJointIndexArrays(
DimensionIndex num_new_dims, TransformRep* orig_transform,
DimensionIndexBuffer* dimensions,
GetNewDimensionShapeFn get_new_dimension_bounds,
GetIndexArrayBasePointerFn get_index_array_base_pointer,
GetIndexArrayByteStrideFn get_index_array_byte_stride) {
const DimensionIndex num_indexed_dims = dimensions->size();
const DimensionIndex output_rank = orig_transform->input_rank;
const DimensionIndex input_rank =
output_rank - dimensions->size() + num_new_dims;
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(input_rank));
auto result = TransformRep::Allocate(input_rank, output_rank);
result->input_rank = input_rank;
result->output_rank = output_rank;
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
span<OutputIndexMap> maps = result->output_index_maps().first(output_rank);
const DimensionIndex num_preserved_dims = output_rank - num_indexed_dims;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
maps[output_dim].SetSingleInputDimension(0);
}
const auto input_domain = result->input_domain(input_rank);
for (DimensionIndex new_dim = 0; new_dim < num_new_dims; ++new_dim) {
input_domain[new_dim] = get_new_dimension_bounds(new_dim);
}
for (DimensionIndex indexed_dim = 0; indexed_dim < num_indexed_dims;
++indexed_dim) {
const DimensionIndex output_dim = (*dimensions)[indexed_dim];
auto& map = maps[output_dim];
map.offset() = 0;
map.stride() = 1;
auto& index_array_data = map.SetArrayIndexing(input_rank);
std::fill_n(index_array_data.byte_strides + num_new_dims,
num_preserved_dims, 0);
for (DimensionIndex new_dim = 0; new_dim < num_new_dims; ++new_dim) {
index_array_data.byte_strides[new_dim] =
get_index_array_byte_stride(indexed_dim, new_dim);
}
index_array_data.element_pointer =
get_index_array_base_pointer(indexed_dim);
}
for (DimensionIndex output_dim = 0, input_dim = num_new_dims;
output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
map.SetSingleInputDimension(input_dim);
map.offset() = 0;
map.stride() = 1;
result->input_dimension(input_dim) =
orig_transform->input_dimension(output_dim);
++input_dim;
}
if (IsDomainExplicitlyEmpty(result.get())) {
ReplaceAllIndexArrayMapsWithConstantMaps(result.get());
}
dimensions->resize(num_new_dims);
std::iota(dimensions->begin(), dimensions->end(),
static_cast<DimensionIndex>(0));
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
Result<TransformRep::Ptr<>> MakeTransformFromIndexArrays(
TransformRep* orig_transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays) {
const DimensionIndex num_indexed_dims = dimensions->size();
if (index_arrays.size() != num_indexed_dims) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", num_indexed_dims,
") does not equal number of index arrays (", index_arrays.size(), ")"));
}
if (index_arrays.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("At least one index array must be specified"));
}
Index shape[kMaxRank];
const DimensionIndex num_new_dims = index_arrays[0].rank();
std::fill_n(&shape[0], num_new_dims, Index(1));
bool error = false;
for (DimensionIndex i = 0; i < index_arrays.size(); ++i) {
if (!BroadcastShapes(index_arrays[i].shape(),
span<Index>(&shape[0], num_new_dims))) {
error = true;
}
}
if (error) {
std::string shape_msg;
for (DimensionIndex i = 0; i < index_arrays.size(); ++i) {
tensorstore::StrAppend(&shape_msg, (shape_msg.empty() ? "" : ", "),
index_arrays[i].shape());
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Index arrays with shapes ", shape_msg,
" cannot be broadcast to a common shape"));
}
const auto get_new_dimension_bounds = [&](DimensionIndex new_dim) {
return IndexInterval::UncheckedSized(0, shape[new_dim]);
};
const auto get_index_array_base_pointer = [&](DimensionIndex indexed_dim) {
return index_arrays[indexed_dim].pointer();
};
const auto get_index_array_byte_stride = [&](DimensionIndex indexed_dim,
DimensionIndex new_dim) {
return index_arrays[indexed_dim].shape()[new_dim] == 1
? 0
: index_arrays[indexed_dim].byte_strides()[new_dim];
};
return MakeTransformFromJointIndexArrays(
num_new_dims, orig_transform, dimensions, get_new_dimension_bounds,
get_index_array_base_pointer, get_index_array_byte_stride);
}
Result<TransformRep::Ptr<>> MakeTransformFromOuterIndexArrays(
TransformRep* orig_transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays) {
const DimensionIndex num_indexed_dims = dimensions->size();
if (index_arrays.size() != num_indexed_dims) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", num_indexed_dims,
") does not equal number of index arrays (", index_arrays.size(), ")"));
}
const DimensionIndex output_rank = orig_transform->input_rank;
DimensionIndex input_rank = output_rank - num_indexed_dims;
for (const auto& index_array : index_arrays) {
input_rank += index_array.rank();
}
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(input_rank));
auto result = TransformRep::Allocate(input_rank, output_rank);
result->input_rank = input_rank;
result->output_rank = output_rank;
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
DimensionIndex index_array_start_dim[kMaxRank];
DimensionIndex index_array_order[kMaxRank];
std::iota(&index_array_order[0], &index_array_order[num_indexed_dims],
static_cast<DimensionIndex>(0));
std::sort(&index_array_order[0], &index_array_order[num_indexed_dims],
[&](DimensionIndex a, DimensionIndex b) {
return (*dimensions)[a] < (*dimensions)[b];
});
span<Index> input_origin = result->input_origin().first(input_rank);
span<Index> input_shape = result->input_shape().first(input_rank);
span<OutputIndexMap> maps = result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0, reordered_indexed_dim = 0, input_dim = 0;
output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
map.stride() = 1;
map.offset() = 0;
if (reordered_indexed_dim < num_indexed_dims) {
const DimensionIndex indexed_dim =
index_array_order[reordered_indexed_dim];
if ((*dimensions)[indexed_dim] == output_dim) {
index_array_start_dim[indexed_dim] = input_dim;
const auto& array = index_arrays[indexed_dim];
MutableBoxView<>(input_origin.subspan(input_dim, array.rank()),
input_shape.subspan(input_dim, array.rank()))
.DeepAssign(array.domain());
const DimensionIndex end_input_dim = input_dim + array.rank();
if (array.num_elements() == 1) {
map.SetConstant();
map.offset() = *array.data();
map.stride() = 0;
} else {
auto& index_array_data = map.SetArrayIndexing(input_rank);
index_array_data.element_pointer = array.element_pointer();
std::fill_n(index_array_data.byte_strides, input_dim, 0);
std::copy(array.byte_strides().begin(), array.byte_strides().end(),
index_array_data.byte_strides + input_dim);
std::fill(index_array_data.byte_strides + end_input_dim,
index_array_data.byte_strides + input_rank, 0);
}
input_dim = end_input_dim;
++reordered_indexed_dim;
continue;
}
}
result->input_dimension(input_dim) =
orig_transform->input_dimension(output_dim);
map.SetSingleInputDimension(input_dim);
++input_dim;
}
if (IsDomainExplicitlyEmpty(result.get())) {
ReplaceAllIndexArrayMapsWithConstantMaps(result.get());
}
dimensions->clear();
dimensions->reserve(input_rank - output_rank);
for (DimensionIndex indexed_dim = 0; indexed_dim < num_indexed_dims;
++indexed_dim) {
const DimensionIndex start_input_dim = index_array_start_dim[indexed_dim];
for (DimensionIndex
input_dim = start_input_dim,
end_input_dim = start_input_dim + index_arrays[indexed_dim].rank();
input_dim != end_input_dim; ++input_dim) {
dimensions->push_back(input_dim);
}
}
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
Result<TransformRep::Ptr<>> MakeTransformFromIndexVectorArray(
TransformRep* orig_transform, DimensionIndexBuffer* dimensions,
DimensionIndex vector_dimension,
const SharedArrayView<const Index>& index_vector_array) {
TENSORSTORE_ASSIGN_OR_RETURN(
vector_dimension,
NormalizeDimensionIndex(vector_dimension, index_vector_array.rank()));
const DimensionIndex num_indexed_dims = dimensions->size();
if (index_vector_array.shape()[vector_dimension] != num_indexed_dims) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Number of selected dimensions (", num_indexed_dims,
") does not equal index vector length (",
index_vector_array.shape()[vector_dimension], ")"));
}
const DimensionIndex num_new_dims = index_vector_array.rank() - 1;
const auto get_index_vector_array_dim = [&](DimensionIndex new_dim) {
return new_dim >= vector_dimension ? new_dim + 1 : new_dim;
};
const auto get_new_dimension_bounds = [&](DimensionIndex new_dim) {
return index_vector_array.domain()[get_index_vector_array_dim(new_dim)];
};
const auto get_index_array_base_pointer = [&](DimensionIndex indexed_dim) {
return std::shared_ptr<const Index>(
index_vector_array.pointer(),
index_vector_array.byte_strided_pointer() +
index_vector_array.byte_strides()[vector_dimension] * indexed_dim);
};
const auto get_index_array_byte_stride = [&](DimensionIndex indexed_dim,
DimensionIndex new_dim) {
const DimensionIndex index_vector_array_dim =
get_index_vector_array_dim(new_dim);
return index_vector_array.shape()[index_vector_array_dim] == 1
? 0
: index_vector_array.byte_strides()[index_vector_array_dim];
};
return MakeTransformFromJointIndexArrays(
num_new_dims, orig_transform, dimensions, get_new_dimension_bounds,
get_index_array_base_pointer, get_index_array_byte_stride);
}
}
Result<IndexTransform<>> ApplyIndexArraySlice(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays, bool outer_indexing,
bool domain_only) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto other_transform,
outer_indexing
? MakeTransformFromOuterIndexArrays(TransformAccess::rep(transform),
dimensions, index_arrays)
: MakeTransformFromIndexArrays(TransformAccess::rep(transform),
dimensions, index_arrays));
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_rep,
ComposeTransforms(TransformAccess::rep(transform),
false, other_transform.get(),
true, domain_only));
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
Result<IndexTransform<>> ApplyIndexVectorArraySlice(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
DimensionIndex vector_dimension,
const SharedArrayView<const Index>& index_vector_array, bool domain_only) {
TENSORSTORE_ASSIGN_OR_RETURN(auto other_transform,
MakeTransformFromIndexVectorArray(
TransformAccess::rep(transform), dimensions,
vector_dimension, index_vector_array));
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_rep,
ComposeTransforms(TransformAccess::rep(transform),
false, other_transform.get(),
true, domain_only));
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::SharedArrayView;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(IndexArraySliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({0, 0, 2})
.input_shape({2, 3, 4})
.input_labels({"", "", "y"})
.output_index_array(
0, 0, 1, MakeArray<Index>({{{1}, {2}, {3}}, {{4}, {5}, {6}}}),
IndexInterval::Sized(0, 7))
.output_single_input_dimension(1, 2)
.output_index_array(
2, 0, 1, MakeArray<Index>({{{7}, {8}, {9}}, {{0}, {1}, {2}}}),
IndexInterval::Sized(0, 10))
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{1, 3, 7}, {0, 0, 3}},
{{2, 3, 8}, {0, 1, 3}},
{{3, 3, 9}, {0, 2, 3}},
{{6, 3, 2}, {1, 2, 3}},
};
TestDimExpression(
original_transform,
Dims(0, 2).IndexArraySlice(MakeArray<Index>({{1, 2, 3}, {4, 5, 6}}),
MakeArray<Index>({{7, 8, 9}, {0, 1, 2}})),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
TestDimExpression(
original_transform,
Dims("x", "z").IndexArraySlice(MakeArray<Index>({{1, 2, 3}, {4, 5, 6}}),
MakeArray<Index>({{7, 8, 9}, {0, 1, 2}})),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
}
TEST(IndexVectorArraySliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({0, 0, 2})
.input_shape({2, 3, 4})
.input_labels({"", "", "y"})
.output_index_array(
0, 0, 1, MakeArray<Index>({{{1}, {2}, {3}}, {{4}, {5}, {6}}}),
IndexInterval::Sized(0, 7))
.output_single_input_dimension(1, 2)
.output_index_array(
2, 0, 1, MakeArray<Index>({{{7}, {8}, {9}}, {{0}, {1}, {2}}}),
IndexInterval::Sized(0, 10))
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 8}, {0, 1, 3}},
{{6, 3, 2}, {1, 2, 3}},
};
TestDimExpression(original_transform,
Dims(0, 2).IndexVectorArraySlice(
MakeArray<Index>({{{1, 7}, {2, 8}, {3, 9}},
{{4, 0}, {5, 1}, {6, 2}}}),
-1),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
TestDimExpression(original_transform,
Dims("x", "z").IndexVectorArraySlice(
MakeArray<Index>({{{1, 7}, {2, 8}, {3, 9}},
{{4, 0}, {5, 1}, {6, 2}}}),
-1),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
}
TEST(IndexArrayOuterIndexArraySliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({4, 2, 0})
.input_shape({5, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<4, 3>()
.input_origin({0, 2, 0, 0})
.input_shape({2, 4, 2, 2})
.input_labels({"", "y", "", ""})
.output_index_array(0, 0, 1, MakeArray<Index>({{{{6}}}, {{{7}}}}),
IndexInterval::Sized(4, 5))
.output_single_input_dimension(1, 1)
.output_index_array(2, 0, 1, MakeArray<Index>({{{{2, 3}, {4, 5}}}}),
IndexInterval::Sized(0, 10))
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{6, 3, 3}, {0, 3, 0, 1}},
{{7, 3, 4}, {1, 3, 1, 0}},
};
TestDimExpression(
original_transform,
Dims(2, 0).OuterIndexArraySlice(MakeArray<Index>({{2, 3}, {4, 5}}),
MakeArray<Index>({6, 7})),
{2, 3, 0},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
TestDimExpression(
original_transform,
Dims("z", "x").OuterIndexArraySlice(MakeArray<Index>({{2, 3}, {4, 5}}),
MakeArray<Index>({6, 7})),
{2, 3, 0},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
}
TEST(IndexArraySliceTest, OneDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexArraySlice(MakeArray<Index>({1, 2})),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, 0, 1, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, -2, -3, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexArraySliceTest, ZeroElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexArraySlice(
tensorstore::AllocateArray<Index>({5, 0, 3})),
{0, 1, 2},
IndexTransformBuilder<4, 2>()
.input_origin({0, 0, 0, -100})
.input_shape({5, 0, 3, 200})
.output_constant(0, 0)
.output_single_input_dimension(1, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({0, 0, 0, -100})
.input_shape({5, 0, 3, 200})
.output_constant(0, -2)
.output_single_input_dimension(1, 10, 11, 3)
.Finalize()
.value(),
{},
false);
}
TEST(IndexArraySliceTest, OneElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexArraySlice(MakeArray<Index>({{5}})),
{0, 1},
IndexTransformBuilder<3, 2>()
.input_origin({0, 0, -100})
.input_shape({1, 1, 200})
.output_constant(0, 5)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({0, 0, -100})
.input_shape({1, 1, 200})
.output_constant(0, -17)
.output_single_input_dimension(1, 10, 11, 2)
.Finalize()
.value(),
{{{5, 6}, {0, 0, 6}}},
false);
}
TEST(IndexArraySliceTest, OneDOutputOneDArrayLabeled) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0)
.IndexArraySlice(MakeArray<Index>({1, 2}))
.Label("index"),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, -2, -3,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexArraySliceTest, TwoDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -2})
.input_shape({20, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, -4, 2, 1)
.Finalize()
.value(),
AllDims()
.IndexArraySlice(MakeArray<Index>({1, 2}),
MakeArray<Index>({3, 4}))
.Label("index"),
{0},
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, 0, 1, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, -2, -3, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, -4, 2, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{1, 3}, {0}}, {{2, 4}, {1}}},
false);
}
TEST(IndexArraySliceTest, TwoDOutputOneDArrayBroadcast) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -2})
.input_shape({20, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, -4, 2, 1)
.Finalize()
.value(),
AllDims().IndexArraySlice(MakeArray<Index>({{1, 2}}),
MakeArray<Index>({{3}, {4}})),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2}}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, 0, 1, MakeArray<Index>({{3}, {4}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, -2, -3, MakeArray<Index>({{1, 2}}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, -4, 2, MakeArray<Index>({{3}, {4}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{1, 3}, {0, 0}}, {{1, 4}, {1, 0}}, {{2, 4}, {1, 1}}},
false);
}
TEST(IndexArraySliceTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(span<const DimensionIndex>({0}))
.IndexArraySlice(MakeArray<Index>({1, 2}), MakeArray<Index>({3, 4})),
absl::StatusCode::kInvalidArgument,
"Number of selected dimensions \\(1\\) does not equal number of index "
"arrays \\(2\\)");
TestDimExpressionError(
IndexTransformBuilder<1, 0>().Finalize().value(),
Dims(span<const DimensionIndex>())
.IndexArraySlice(span<const SharedArrayView<const Index>>()),
absl::StatusCode::kInvalidArgument,
"At least one index array must be specified");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(0, 1).IndexArraySlice(MakeArray<Index>({1, 2}),
MakeArray<Index>({3, 4, 5})),
absl::StatusCode::kInvalidArgument,
"Index arrays with shapes \\{2\\}, \\{3\\} cannot be broadcast "
"to a common shape");
}
TEST(IndexArraySliceTest, InvalidRank) {
auto index_array = tensorstore::AllocateArray<Index>(
std::vector<Index>(32, 1), tensorstore::c_order, tensorstore::value_init);
TestDimExpressionError(tensorstore::IdentityTransform(2),
Dims(0).IndexArraySlice(index_array),
absl::StatusCode::kInvalidArgument,
"Rank 33 is outside valid range \\[0, 32\\]");
}
TEST(IndexVectorArraySliceTest, OneDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({{1, 2}}),
0),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, -2, -3,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexVectorArraySliceTest, OneElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({{1}}), 0),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({1, 200})
.output_constant(0, 1)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({1, 200})
.output_constant(0, -5)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}},
false);
}
TEST(IndexVectorArraySliceTest, OneDOutputOneDArrayLabeled) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0)
.IndexVectorArraySlice(MakeArray<Index>({{1, 2}}), 0)
.Label("index"),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, 0, 1, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, -2, -3, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexVectorArraySliceTest, TwoDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -2})
.input_shape({20, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, -4, 2, 1)
.Finalize()
.value(),
AllDims()
.IndexVectorArraySlice(
MakeArray<Index>({{1, 3}, {2, 4}}), -1)
.Label("index"),
{0},
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, 0, 1, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, -2, -3, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, -4, 2, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{1, 3}, {0}}, {{2, 4}, {1}}},
false);
}
TEST(IndexVectorArraySliceTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({1, 2}), 0),
absl::StatusCode::kInvalidArgument,
"Number of selected dimensions \\(1\\) does not equal index vector "
"length \\(2\\)");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({1, 2}), 1),
absl::StatusCode::kInvalidArgument,
"Dimension index 1 is outside valid range \\[-1, 1\\)");
}
TEST(IndexVectorArraySliceTest, InvalidRank) {
TestDimExpressionError(
tensorstore::IdentityTransform(4),
Dims(0, 1).IndexVectorArraySlice(
tensorstore::AllocateArray<Index>({1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 2},
tensorstore::c_order,
tensorstore::default_init),
-1),
absl::StatusCode::kInvalidArgument,
"Rank 33 is outside valid range \\[0, 32\\]");
}
TEST(OuterIndexArraySliceTest, Integration) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({-10, -100, -2})
.input_shape({21, 200, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 6, 5, 1)
.output_single_input_dimension(2, -4, 2, 2)
.Finalize()
.value(),
Dims(0, 2).OuterIndexArraySlice(
MakeArray<Index>({{3, 4, 5}, {8, 9, 10}}),
MakeArray<Index>({1, 2})),
{0, 1, 3},
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 2})
.output_index_array(
0, 0, 1,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 2)
.output_index_array(2, 0, 1,
MakeArray<Index>({{{{1, 2}}}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 2})
.output_index_array(
0, -2, -3,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 6, 5, 2)
.output_index_array(2, -4, 2,
MakeArray<Index>({{{{1, 2}}}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{3, 5, 1}, {0, 0, 5, 0}},
{{9, 5, 2}, {1, 1, 5, 1}},
{{8, 5, 2}, {1, 0, 5, 1}},
{{10, 5, 2}, {1, 2, 5, 1}}},
false);
}
TEST(OuterIndexArraySliceTest, OneElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({-10, -100, -2})
.input_shape({21, 200, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 6, 5, 1)
.output_single_input_dimension(2, -4, 2, 2)
.Finalize()
.value(),
Dims(0, 2).OuterIndexArraySlice(
MakeArray<Index>({{3, 4, 5}, {8, 9, 10}}),
MakeArray<Index>({1})),
{0, 1, 3},
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 1})
.output_index_array(
0, 0, 1,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 2)
.output_constant(2, 1)
.Finalize()
.value(),
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 1})
.output_index_array(
0, -2, -3,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 6, 5, 2)
.output_constant(2, -2)
.Finalize()
.value(),
{{{3, 5, 1}, {0, 0, 5, 0}}},
false);
}
TEST(OuterIndexArraySliceTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(span<const DimensionIndex>({0}))
.OuterIndexArraySlice(MakeArray<Index>({1, 2}),
MakeArray<Index>({3, 4})),
absl::StatusCode::kInvalidArgument,
"Number of selected dimensions \\(1\\) does not equal number of index "
"arrays \\(2\\)");
}
TEST(OuterIndexArraySliceTest, InvalidRank) {
auto index_array = tensorstore::AllocateArray<Index>(
std::vector<Index>(17, 1), tensorstore::c_order, tensorstore::value_init);
TestDimExpressionError(
tensorstore::IdentityTransform(2),
Dims(0, 1).OuterIndexArraySlice(index_array, index_array),
absl::StatusCode::kInvalidArgument,
"Rank 34 is outside valid range \\[0, 32\\]");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/index_array_slice_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_array_slice_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f8ba3194-ffe1-415c-af89-f97e6987e7a8 | cpp | google/tensorstore | transform_array | tensorstore/index_space/internal/transform_array.cc | tensorstore/index_space/transform_array_test.cc | #include "tensorstore/index_space/internal/transform_array.h"
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/internal/propagate_bounds.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
namespace tensorstore {
namespace internal_index_space {
Result<SharedElementPointer<const void>> TransformArraySubRegion(
const SharedArrayView<const void, dynamic_rank, offset_origin>& array,
TransformRep* transform, const Index* result_origin,
const Index* result_shape, Index* result_byte_strides,
TransformArrayConstraints constraints) {
const DimensionIndex input_rank =
transform ? transform->input_rank : array.rank();
for (DimensionIndex i = 0; i < input_rank; ++i) {
if (result_shape[i] == 0) {
std::fill_n(result_byte_strides, input_rank, 0);
return SharedElementPointer<const void>(std::shared_ptr<const void>(),
array.dtype());
}
}
namespace flags = input_dimension_iteration_flags;
flags::Bitmask input_dimension_flags[kMaxRank];
std::fill_n(
&input_dimension_flags[0], input_rank,
flags::GetDefaultBitmask(constraints.repeated_elements_constraint()));
SingleArrayIterationState single_array_states[2];
TENSORSTORE_RETURN_IF_ERROR(
internal_index_space::InitializeSingleArrayIterationState(
array,
transform,
result_origin,
result_shape, &single_array_states[0],
&input_dimension_flags[0]));
if (single_array_states[0].num_array_indexed_output_dimensions == 0) {
if (constraints.allocate_constraint() != must_allocate) {
std::copy_n(&single_array_states[0].input_byte_strides[0], input_rank,
result_byte_strides);
return SharedElementPointer<void>(
std::shared_ptr<void>(array.pointer(),
single_array_states[0].base_pointer),
array.element_pointer().dtype());
}
const StridedLayoutView<> source_layout(
input_rank, result_shape,
&single_array_states[0].input_byte_strides[0]);
const StridedLayoutView<> new_layout(input_rank, result_shape,
result_byte_strides);
auto element_pointer = internal::AllocateArrayLike(
array.element_pointer().dtype(), source_layout, result_byte_strides,
constraints.iteration_constraints(), default_init);
CopyArray(ArrayView<const void>(
ElementPointer<void>(single_array_states[0].base_pointer,
array.element_pointer().dtype()),
source_layout),
ArrayView<void>(element_pointer, new_layout));
return element_pointer;
}
MarkSingletonDimsAsSkippable(span(result_shape, input_rank),
&input_dimension_flags[0]);
SharedElementPointer<void> new_element_pointer;
if (constraints.order_constraint()) {
Index new_shape[kMaxRank];
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
new_shape[input_dim] = input_dimension_flags[input_dim] == flags::can_skip
? 1
: result_shape[input_dim];
}
ComputeStrides(constraints.order_constraint().order(), array.dtype()->size,
span<const Index>(&new_shape[0], input_rank),
span(result_byte_strides, input_rank));
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (new_shape[input_dim] <= 1) result_byte_strides[input_dim] = 0;
}
const Index new_origin_offset =
IndexInnerProduct(input_rank, result_byte_strides, result_origin);
new_element_pointer = internal::AllocateAndConstructSharedElements(
ProductOfExtents(span<const Index>(new_shape, input_rank)),
default_init, array.dtype());
const absl::Status init_status =
internal_index_space::InitializeSingleArrayIterationState(
ArrayView<void, dynamic_rank, offset_origin>(
AddByteOffset(ElementPointer<void>(new_element_pointer),
-new_origin_offset),
StridedLayoutView<dynamic_rank, offset_origin>(
input_rank, result_origin, &new_shape[0],
result_byte_strides)),
nullptr,
result_origin,
result_shape, &single_array_states[1],
&input_dimension_flags[0]);
assert(init_status.ok());
}
DimensionIterationOrder base_layout =
constraints.order_constraint()
? ComputeDimensionIterationOrder<2>(
single_array_states,
span(input_dimension_flags).first(input_rank),
{})
: ComputeDimensionIterationOrder<1>(
{&single_array_states[0], 1},
span(input_dimension_flags).first(input_rank),
{});
if (!constraints.order_constraint()) {
Index new_shape[kMaxRank];
Index new_byte_strides[kMaxRank];
for (DimensionIndex i = 0; i < base_layout.pure_strided_end_dim; ++i) {
const DimensionIndex input_dim = base_layout.input_dimension_order[i];
new_shape[i] = result_shape[input_dim];
}
std::fill_n(result_byte_strides, input_rank, 0);
ComputeStrides(
ContiguousLayoutOrder::c, array.dtype()->size,
span<const Index>(&new_shape[0], base_layout.pure_strided_end_dim),
span<Index>(&new_byte_strides[0], base_layout.pure_strided_end_dim));
for (DimensionIndex i = 0; i < base_layout.pure_strided_end_dim; ++i) {
const DimensionIndex input_dim = base_layout.input_dimension_order[i];
result_byte_strides[input_dim] = new_byte_strides[i];
}
new_element_pointer = internal::AllocateAndConstructSharedElements(
ProductOfExtents(
span<const Index>(&new_shape[0], base_layout.pure_strided_end_dim)),
default_init, array.dtype());
const Index new_origin_offset =
IndexInnerProduct(input_rank, result_byte_strides, result_origin);
const absl::Status init_status =
internal_index_space::InitializeSingleArrayIterationState(
ArrayView<void, dynamic_rank, offset_origin>(
AddByteOffset(ElementPointer<void>(new_element_pointer),
-new_origin_offset),
StridedLayoutView<dynamic_rank, offset_origin>(
input_rank, result_origin, &new_shape[0],
result_byte_strides)),
nullptr,
result_origin,
result_shape, &single_array_states[1],
&input_dimension_flags[0]);
assert(init_status.ok());
}
SimplifiedDimensionIterationOrder layout = SimplifyDimensionIterationOrder<2>(
base_layout, span(result_shape, input_rank), single_array_states);
const std::array<std::ptrdiff_t, 2> element_sizes{array.dtype()->size,
array.dtype()->size};
[[maybe_unused]] const bool success = IterateUsingSimplifiedLayout<2>(
layout, span(result_shape, input_rank),
{&array.dtype()->copy_assign, nullptr},
nullptr, single_array_states, element_sizes);
assert(success);
return new_element_pointer;
}
Result<SharedElementPointer<const void>> TransformArrayPreservingOrigin(
SharedArrayView<const void, dynamic_rank, offset_origin> array,
TransformRep* transform, Index* result_origin, Index* result_shape,
Index* result_byte_strides, TransformArrayConstraints constraints) {
const DimensionIndex input_rank =
transform ? transform->input_rank : array.rank();
TENSORSTORE_RETURN_IF_ERROR(PropagateExplicitBounds(
array.domain(),
transform,
MutableBoxView<>(input_rank, result_origin, result_shape)));
TENSORSTORE_ASSIGN_OR_RETURN(
auto element_pointer,
TransformArraySubRegion(array, transform, result_origin, result_shape,
result_byte_strides, constraints));
return AddByteOffset(std::move(element_pointer),
-IndexInnerProduct(transform->input_rank,
result_byte_strides, result_origin));
}
Result<SharedElementPointer<const void>> TransformArrayDiscardingOrigin(
SharedArrayView<const void, dynamic_rank, offset_origin> array,
TransformRep* transform, Index* result_shape, Index* result_byte_strides,
TransformArrayConstraints constraints) {
const DimensionIndex input_rank =
transform ? transform->input_rank : array.rank();
Index result_origin[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(PropagateExplicitBounds(
array.domain(),
transform,
MutableBoxView<>(input_rank, &result_origin[0], result_shape)));
return TransformArraySubRegion(array, transform, &result_origin[0],
result_shape, result_byte_strides,
constraints);
}
}
} | #include "tensorstore/index_space/internal/transform_array.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MatchesStatus;
TEST(TransformArrayTest, OneDimensionalIdentity) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array, IdentityTransform<1>())
.value();
EXPECT_EQ(original_array, new_array);
}
TEST(TransformArrayTest, OneDimensionalIdentityWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array, IdentityTransform<1>())
.value();
EXPECT_EQ(original_array, new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceUnstrided) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array = tensorstore::TransformArray(
original_array, IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(1), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 3}), new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceUnstridedWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array,
IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, 5, 1, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(6), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 3}), new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceStrided) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(
original_array, IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, -1, 2, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(1), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 4}), new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceStridedWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array,
IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, 4, 2, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(6), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 4}), new_array);
}
TEST(TransformArrayTest, OneDArrayOneDIndexArray) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({4})
.output_index_array(0, 1, 1, MakeArray<Index>({0, 2, 2, 1}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({2}, {2, 4, 4, 3}), new_array);
}
TEST(TransformArrayTest, OneDArrayOneDIndexArray1025) {
constexpr Index kSize = 1025;
auto index_array = tensorstore::AllocateArray<Index>({kSize});
for (Index i = 0; i < kSize; ++i) index_array(i) = i;
auto new_array =
tensorstore::TransformArray(index_array,
IndexTransformBuilder<1, 1>()
.input_shape({kSize})
.output_index_array(0, 0, 1, index_array)
.Finalize()
.value())
.value();
EXPECT_EQ(index_array, new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayRetainZeroStride) {
auto index_array = tensorstore::MakeArray<Index>({0, 1, 2, 3, 4});
tensorstore::SharedArray<Index, 2> index_array2;
index_array2.element_pointer() = index_array.element_pointer();
index_array2.shape()[0] = 5;
index_array2.shape()[1] = 2;
index_array2.byte_strides()[0] = index_array.byte_strides()[0];
index_array2.byte_strides()[1] = 0;
EXPECT_EQ(index_array2,
MakeArray<Index>({{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}}));
auto new_array =
tensorstore::TransformArray(index_array2,
IndexTransformBuilder<2, 2>()
.input_shape({5, 2})
.output_index_array(0, 0, 1, index_array2)
.output_single_input_dimension(1, 1)
.Finalize()
.value())
.value();
EXPECT_EQ(index_array2, new_array);
EXPECT_EQ(index_array2.layout(), new_array.layout());
}
TEST(TransformArrayTest, IndexArrayBoundsOverflow) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
EXPECT_THAT(tensorstore::TransformArray(
original_array,
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({4})
.output_index_array(0, std::numeric_limits<Index>::min(),
1, MakeArray<Index>({0, 2, 2, 1}))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Integer overflow propagating range.*"));
}
TEST(TransformArrayTest, OneDArrayOneDIndexArrayWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({4})
.output_index_array(0, 6, 1, MakeArray<Index>({0, 2, 2, 1}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({2}, {2, 4, 4, 3}), new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArray) {
auto original_array =
tensorstore::MakeArray<int>({{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, -1, 1, 0)
.output_index_array(1, 1, 1, MakeArray<Index>({{0, 2, 2, 1}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{2, 4, 4, 3}, {6, 8, 8, 7}}),
new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayWithOrigin) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 4, 1, 0)
.output_index_array(1, 7, 1, MakeArray<Index>({{0, 2, 2, 1}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{2, 4, 4, 3}, {6, 8, 8, 7}}),
new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayStrided) {
auto original_array =
tensorstore::MakeArray<int>({{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 2, -1, 0)
.output_index_array(1, 1, 2, MakeArray<Index>({{0, 1, 1, 0}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{6, 8, 8, 6}, {2, 4, 4, 2}}),
new_array);
}
TEST(TransformArrayTest, ArrayIndexOutOfBounds) {
auto original_array =
tensorstore::MakeArray<int>({{1, 2, 3, 4}, {5, 6, 7, 8}});
EXPECT_THAT(
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 2, -1, 0)
.output_index_array(1, 1, 2, MakeArray<Index>({{0, 2, 1, 0}}))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 2 is outside valid range \\[0, 2\\).*"));
EXPECT_THAT(
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 2, -1, 0)
.output_index_array(1, 1, 2, MakeArray<Index>({{0, -1, 1, 0}}))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index -1 is outside valid range \\[0, 2\\).*"));
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayStridedWithOrigin) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2, MakeArray<Index>({{0, 1, 1, 0}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{6, 8, 8, 6}, {2, 4, 4, 2}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int), sizeof(int) * 2));
}
TEST(TransformArrayTest, IncludeRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
tensorstore::include_repeated_elements)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(
new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 2, sizeof(int) * 4, sizeof(int)));
}
TEST(TransformArrayTest, SkipSingleton) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 1})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
tensorstore::skip_repeated_elements)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3},
{{{6}, {8}, {8}, {6}}, {{2}, {4}, {4}, {2}}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int), sizeof(int) * 2, 0));
}
TEST(TransformArrayTest, SkipRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
tensorstore::skip_repeated_elements)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int), sizeof(int) * 2, 0));
}
TEST(TransformArrayTest, OrderConstraint) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2, MakeArray<Index>({{0, 1, 1, 0}}))
.Finalize()
.value(),
tensorstore::c_order)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{6, 8, 8, 6}, {2, 4, 4, 2}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 4, sizeof(int)));
}
TEST(TransformArrayTest, OrderConstraintIncludeRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
{tensorstore::c_order, tensorstore::include_repeated_elements})
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(
new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 8, sizeof(int) * 2, sizeof(int)));
}
TEST(TransformArrayTest, OrderConstraintSkipRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
{tensorstore::c_order, tensorstore::skip_repeated_elements})
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 4, sizeof(int), 0));
}
TEST(TransformArrayTest, MultipleArrayIndexedDimensions) {
auto original_array = tensorstore::MakeArray<int>({{1, 2}, {5, 6}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{0, 1}}))
.output_index_array(1, 0, 1, MakeArray<Index>({{0}, {1}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeArray<int>({{1, 5}, {2, 6}}), new_array);
}
TEST(TransformArrayTest, EmptyDomain) {
auto original_array = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
(IndexTransformBuilder<2, 2>()
.input_shape({0, 3})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_index_array(0, 0, 1, MakeArray<Index>({{0, 1, 2}}))
.Finalize()));
EXPECT_THAT(tensorstore::TransformArray(original_array, transform),
::testing::Optional(tensorstore::AllocateArray<int>({0, 3})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/transform_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
19ac5ec1-6718-4849-b3c2-d2aff780b06c | cpp | google/tensorstore | translate_op | tensorstore/index_space/internal/translate_op.cc | tensorstore/index_space/translate_op_test.cc | #include "tensorstore/index_space/internal/translate_op.h"
#include <algorithm>
#include "absl/status/status.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status TranslateOutputOffsetsUsingInputOffsets(
TransformRep* transform, const Index* input_offsets) {
const DimensionIndex output_rank = transform->output_rank;
const DimensionIndex input_rank = transform->input_rank;
span<OutputIndexMap> maps = transform->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
switch (map.method()) {
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const Index offset_change = input_offsets[input_dim];
Index new_offset;
if (internal::MulOverflow(offset_change, map.stride(), &new_offset) ||
internal::SubOverflow(map.offset(), new_offset, &map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing output offset for dimension ",
output_dim, "."));
}
break;
}
case OutputIndexMethod::array: {
auto& index_array_data = map.index_array_data();
index_array_data.element_pointer = AddByteOffset(
std::move(index_array_data.element_pointer),
-IndexInnerProduct(input_rank, index_array_data.byte_strides,
input_offsets));
break;
}
case OutputIndexMethod::constant:
break;
}
}
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyTranslate(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView offsets,
TranslateOpKind kind,
bool domain_only) {
const DimensionIndex num_dims = dimensions->size();
const DimensionIndex input_rank = transform.input_rank();
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(offsets, num_dims));
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
const auto input_domain = rep->input_domain(input_rank);
Index input_offsets[kMaxRank];
std::fill_n(&input_offsets[0], input_rank, static_cast<Index>(0));
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
Index offset = offsets[i];
if (offset == kImplicit) continue;
const IndexInterval old_interval = input_domain[input_dim];
IndexInterval new_interval;
switch (kind) {
case TranslateOpKind::kTranslateTo: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftIntervalTo(old_interval, offset));
offset = new_interval.inclusive_min() - old_interval.inclusive_min();
break;
}
case TranslateOpKind::kTranslateBackwardBy: {
offset = -offset;
}
[[fallthrough]];
case TranslateOpKind::kTranslateBy: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftInterval(old_interval, offset));
break;
}
}
input_domain[input_dim] = new_interval;
input_offsets[input_dim] = offset;
}
TENSORSTORE_RETURN_IF_ERROR(
TranslateOutputOffsetsUsingInputOffsets(rep.get(), &input_offsets[0]));
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(TranslateByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({11, 2, 23})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {12, 3, 23}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateBackwardByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({-9, 2, -17})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {-8, 3, -17}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateToTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({10, 2, 20})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -9, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -17, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {11, 3, 20}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateByTest, OneDimensionalConstant) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimension) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2 - 3 * 5, 3, 0)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{4}, {4}}});
}
TEST(TranslateByTest, OneDimensionalIndexArray) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
{{{1}, {6}}});
}
TEST(TranslateByTest, AllDimsUniform) {
TestDimExpression(
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 5, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 10})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4, 7, 1)
.output_single_input_dimension(4, 5, 8, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0, 1, 2},
IndexTransformBuilder<3, 3>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, -5, 1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, 1 - 4 * 5, 4, 0)
.output_single_input_dimension(1, 2 - 5 * 5, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4 - 7 * 5, 7, 1)
.output_single_input_dimension(4, 5 - 8 * 5, 8, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5 + 5, 6 + 5}}});
}
TEST(TranslateByTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateBy(span<const Index>({1, 2})),
absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) does not match number of "
"indices \\(2\\)");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-kInfIndex),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMaxFiniteIndex - 1})
.input_shape({2})
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(
0, std::numeric_limits<Index>::min(), 1, 0)
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
"Integer overflow computing output offset .*");
}
TEST(TranslateByTest, DimSubsetUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy(5),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 5, 2, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5, 6 + 5}}});
}
TEST(TranslateByTest, DimSubsetNonUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy({5, 6}),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -6, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 6, 2, 2)
.Finalize()
.value(),
{{{3, 4, 5}, {3 + 5, 4, 5 + 6}}});
}
TEST(TranslateToTest, OneDimensionalConstant) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateTo(8),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_single_input_dimension(0, -3, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
{{{7}, {10}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimension) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, -1, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, 2 - 3, 3, 0)
.Finalize()
.value(),
{{{6}, {7}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{6}, {6}}});
}
TEST(TranslateToTest, TwoDimensionalSingleInputDimensionOneImplicit) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({4, 5})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, 4, 5, 1)
.Finalize()
.value(),
AllDims().TranslateTo({kImplicit, 10}),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, -5, 1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, -25 + 4, 5, 1)
.Finalize()
.value(),
{{{6, 7}, {6, 12}}});
}
TEST(TranslateToTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateTo(1),
absl::StatusCode::kInvalidArgument,
"Interval \\(-inf, \\+inf\\) is not bounded below");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateTo(std::numeric_limits<Index>::max()),
absl::StatusCode::kOutOfRange, "Origin [0-9]+ is outside valid range .*");
}
TEST(TranslateToTest, IndexDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain,
IndexDomainBuilder<3>().origin({1, 2, 3}).shape({6, 7, 8}).Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto translated_domain,
IndexDomainBuilder<3>().origin({4, 5, 6}).shape({6, 7, 8}).Finalize());
EXPECT_THAT(domain | AllDims().TranslateTo({4, 5, 6}),
::testing::Optional(translated_domain));
}
TEST(TranslateToTest, IndexDomainOverflow) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({10})
.output_single_input_dimension(0, kMaxFiniteIndex, kMaxFiniteIndex, 0)
.Finalize());
auto domain = transform.domain();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto translated_domain,
IndexDomainBuilder(1).origin({-5}).shape({10}).Finalize());
EXPECT_THAT(transform | AllDims().TranslateTo({-5}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(domain | AllDims().TranslateTo({-5}),
::testing::Optional(translated_domain));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/translate_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/translate_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4de049ec-a935-430b-bf68-3d954c1ba46f | cpp | google/tensorstore | interval_slice_op | tensorstore/index_space/internal/interval_slice_op.cc | tensorstore/index_space/interval_slice_op_test.cc | #include "tensorstore/index_space/internal/interval_slice_op.h"
#include <algorithm>
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
struct InputDimensionIntervalSliceInfo {
Index offset;
Index stride;
};
absl::Status GetIntervalSliceInfo(
span<InputDimensionIntervalSliceInfo> dimension_info,
TransformRep* transform, span<const DimensionIndex> dimensions,
IntervalForm interval_form, bool translate,
IndexVectorOrScalarView start_vector,
IndexVectorOrScalarView stop_or_size_vector,
IndexVectorOrScalarView stride_vector) {
const DimensionIndex input_rank = dimension_info.size();
assert(input_rank == transform->input_rank);
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
dimension_info[input_dim] = InputDimensionIntervalSliceInfo{0, 1};
}
auto compute_input_domain_slice = [&](DimensionIndex i,
DimensionIndex input_dim) {
const Index stride = stride_vector[i];
const InputDimensionRef d = transform->input_dimension(input_dim);
auto& info = dimension_info[input_dim];
info.stride = stride;
OptionallyImplicitIndexInterval new_domain;
TENSORSTORE_RETURN_IF_ERROR(ComputeStridedSliceMap(
d.optionally_implicit_domain(), interval_form,
translate ? 0 : kImplicit, start_vector[i], stop_or_size_vector[i],
stride, &new_domain, &info.offset));
d.domain() = new_domain.interval();
d.implicit_lower_bound() = new_domain.implicit_lower();
d.implicit_upper_bound() = new_domain.implicit_upper();
return absl::OkStatus();
};
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
const DimensionIndex input_dim = dimensions[i];
TENSORSTORE_RETURN_IF_ERROR(
compute_input_domain_slice(i, input_dim),
MaybeAnnotateStatus(
_,
tensorstore::StrCat("Computing interval slice for input dimension ",
input_dim)));
}
return absl::OkStatus();
}
absl::Status ApplyOffsetsAndStridesToOutputIndexMaps(
TransformRep* rep,
span<const InputDimensionIntervalSliceInfo> input_dimension_info) {
const DimensionIndex input_rank = input_dimension_info.size();
const DimensionIndex output_rank = rep->output_rank;
BoxView<> input_domain = rep->input_domain(input_rank);
const bool domain_is_explicitly_empty = IsDomainExplicitlyEmpty(rep);
span<OutputIndexMap> maps = rep->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const auto& slice_info = input_dimension_info[input_dim];
Index offset;
if (internal::MulOverflow(slice_info.offset, map.stride(), &offset) ||
internal::AddOverflow(offset, map.offset(), &map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing offset for output dimension ",
output_dim));
}
if (internal::MulOverflow(slice_info.stride, map.stride(),
&map.stride())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing stride for output dimension ",
output_dim));
}
break;
}
case OutputIndexMethod::array: {
if (domain_is_explicitly_empty) {
map.SetConstant();
map.offset() = 0;
map.stride() = 0;
break;
}
auto& index_array_data = map.index_array_data();
Index element_pointer_byte_offset = 0;
bool array_is_singleton = true;
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
const auto& slice_info = input_dimension_info[input_dim];
Index& byte_stride = index_array_data.byte_strides[input_dim];
element_pointer_byte_offset = internal::wrap_on_overflow::Add(
element_pointer_byte_offset, internal::wrap_on_overflow::Multiply(
byte_stride, slice_info.offset));
byte_stride = internal::wrap_on_overflow::Multiply(byte_stride,
slice_info.stride);
if (input_domain.shape()[input_dim] == 1) {
element_pointer_byte_offset = internal::wrap_on_overflow::Add(
element_pointer_byte_offset,
internal::wrap_on_overflow::Multiply(
byte_stride, input_domain.origin()[input_dim]));
byte_stride = 0;
} else if (byte_stride != 0) {
array_is_singleton = false;
}
}
index_array_data.element_pointer =
AddByteOffset(std::move(index_array_data.element_pointer),
element_pointer_byte_offset);
if (array_is_singleton) {
const Index index = *index_array_data.array_view(input_domain)
.byte_strided_origin_pointer();
const IndexInterval index_range = index_array_data.index_range;
map.SetConstant();
TENSORSTORE_RETURN_IF_ERROR(ReplaceZeroRankIndexArrayIndexMap(
index, index_range, &map.offset(), &map.stride()));
}
break;
}
}
}
internal_index_space::DebugCheckInvariants(rep);
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyIntervalSliceOp(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
IntervalForm interval_form, bool translate,
IndexVectorOrScalarView start_vector,
IndexVectorOrScalarView stop_or_size_vector,
IndexVectorOrScalarView stride_vector, bool domain_only) {
const DimensionIndex num_dims = dimensions->size();
const DimensionIndex input_rank = transform.input_rank();
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(start_vector, num_dims));
TENSORSTORE_RETURN_IF_ERROR(
CheckIndexVectorSize(stop_or_size_vector, num_dims));
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(stride_vector, num_dims));
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
InputDimensionIntervalSliceInfo input_dimension_info[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(
GetIntervalSliceInfo(span(input_dimension_info).first(input_rank),
rep.get(), *dimensions, interval_form, translate,
start_vector, stop_or_size_vector, stride_vector));
TENSORSTORE_RETURN_IF_ERROR(ApplyOffsetsAndStridesToOutputIndexMaps(
rep.get(), span(input_dimension_info).first(input_rank)));
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexTransform<>> ApplyStrideOp(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView strides,
bool domain_only) {
const DimensionIndex num_dims = dimensions->size();
const DimensionIndex input_rank = transform.input_rank();
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(strides, num_dims));
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
InputDimensionIntervalSliceInfo input_dimension_info[kMaxRank];
std::fill_n(&input_dimension_info[0], input_rank,
InputDimensionIntervalSliceInfo{0, 1});
const auto compute_input_domain = [&](DimensionIndex i,
DimensionIndex input_dim) {
const Index stride = strides[i];
if (stride == 0) {
return absl::InvalidArgumentError("Stride must be non-zero");
}
input_dimension_info[input_dim].stride = stride;
const InputDimensionRef d = rep->input_dimension(input_dim);
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_domain,
GetAffineTransformDomain(d.optionally_implicit_domain(), 0,
stride));
d.domain() = new_domain.interval();
d.implicit_lower_bound() = new_domain.implicit_lower();
d.implicit_upper_bound() = new_domain.implicit_upper();
return absl::OkStatus();
};
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
TENSORSTORE_RETURN_IF_ERROR(
compute_input_domain(i, input_dim),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Applying stride to input dimension ",
input_dim)));
}
TENSORSTORE_RETURN_IF_ERROR(ApplyOffsetsAndStridesToOutputIndexMaps(
rep.get(), span(input_dimension_info).first(input_rank)));
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::BoxView;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::StrCat;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
using ::tensorstore::internal_index_space::TestDimExpressionError;
TEST(ClosedIntervalTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({1, 2, -4})
.input_shape({4, 4, 3})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -2, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 6}, {2, 3, -3}},
};
TestDimExpression(
original_transform,
Dims(0, 2).ClosedInterval({1, 8}, {4, 3}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(
original_transform,
Dims("x", "z").ClosedInterval({1, 8}, {4, 3}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(ClosedIntervalTest, ExampleWithOffset) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({1, 2, -4})
.input_shape({4, 4, 4})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 1, -2, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 7}, {2, 3, -3}},
};
TestDimExpression(
original_transform,
Dims(0, 2).ClosedInterval({1, 9}, {4, 3}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(HalfOpenIntervalTest, Example) {
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({1, 2, -4})
.input_shape({3, 4, 3})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -2, 2)
.Finalize()
.value();
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value(),
Dims(0, 2).HalfOpenInterval({1, 8}, {4, 3}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
{{{2, 3, 6}, {2, 3, -3}}});
}
TEST(SizedIntervalTest, Example) {
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({1, 2, -4})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -2, 2)
.Finalize()
.value();
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value(),
Dims(0, 2).SizedInterval({1, 8}, {3, 2}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
{{{2, 3, 6}, {2, 3, -3}}});
}
TEST(ClosedIntervalTest, OneDimensionalConstantNonStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({10})
.output_constant(0, 3)
.Finalize()
.value(),
AllDims().ClosedInterval(-3, 4),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-3})
.input_shape({8})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-3})
.input_shape({8})
.output_constant(0, 3)
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalConstantPositiveStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({12})
.output_constant(0, 3)
.Finalize()
.value(),
AllDims().ClosedInterval(-3, 5, 2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({5})
.output_single_input_dimension(0, -1, 2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({5})
.output_constant(0, 3)
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalConstantNegativeStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({12})
.output_constant(0, 3)
.Finalize()
.value(),
AllDims().ClosedInterval(5, -3, -2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({5})
.output_single_input_dimension(0, 1, -2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({5})
.output_constant(0, 3)
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalSingleInputDimensionNonStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({10})
.output_single_input_dimension(0, 3, 2, 0)
.Finalize()
.value(),
AllDims().ClosedInterval(-3, 4),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-3})
.input_shape({8})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-3})
.input_shape({8})
.output_single_input_dimension(0, 3, 2, 0)
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalSingleInputDimensionPositiveStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({12})
.output_single_input_dimension(0, 3, 2, 0)
.Finalize()
.value(),
AllDims().ClosedInterval(-3, 5, 2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({5})
.output_single_input_dimension(0, -1, 2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({5})
.output_single_input_dimension(0, 1, 4, 0)
.Finalize()
.value(),
{{{-3}, {-1}}, {{-1}, {0}}});
}
TEST(ClosedIntervalTest, OneDimensionalArrayNonStrided) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().ClosedInterval(-1, 1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({3})
.output_index_array(0, 3, 2, MakeArray<Index>({5, 4, 3}))
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalArrayNonStridedZeroElements) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().ClosedInterval(-1, -2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({0})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({0})
.output_constant(0, 0)
.Finalize()
.value(),
{});
}
TEST(ClosedIntervalTest, OneDimensionalArrayNonStridedOneElement) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().ClosedInterval(-1, -1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({1})
.output_constant(0, 13)
.Finalize()
.value(),
{{{-1}, {-1}}});
}
TEST(ClosedIntervalTest, OneDimensionalArrayNonStridedInvalidOneElement) {
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}),
IndexInterval::Closed(3, 4))
.Finalize()
.value(),
AllDims().ClosedInterval(-1, -1), absl::StatusCode::kOutOfRange,
"Index 5 is outside valid range \\[3, 5\\)");
}
TEST(SliceTranslateClosedIntervalTest, OneDimensionalArrayNonStrided) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().TranslateClosedInterval(-1, 1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_single_input_dimension(0, -1, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_index_array(0, 3, 2, MakeArray<Index>({5, 4, 3}))
.Finalize()
.value(),
{{{1}, {2}}});
}
TEST(SliceTranslateClosedIntervalTest, OneDimensionalArrayStrided) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().TranslateClosedInterval(-1, 1, 2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_single_input_dimension(0, -1, 2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, 3, 2, MakeArray<Index>({5, 3}))
.Finalize()
.value(),
{{{-1}, {0}}});
}
TEST(ClosedIntervalTest, DimSubset) {
TestDimExpression(
IndexTransformBuilder<4, 4>()
.input_origin({-10, 1, 2, -kInfIndex})
.input_shape({kInfIndex + 1, 4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 1)
.output_single_input_dimension(1, 2, 3, 3)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>(
{{{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}}))
.Finalize()
.value(),
Dims(1, 2, 0).ClosedInterval({2, 2, -5}, {3, 4, 10}),
{1, 2, 0},
IndexTransformBuilder<4, 4>()
.input_origin({-5, 2, 2, -kInfIndex})
.input_shape({16, 2, 3, kInfIndex + 7})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<4, 4>()
.input_origin({-5, 2, 2, -kInfIndex})
.input_shape({16, 2, 3, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 1)
.output_single_input_dimension(1, 2, 3, 3)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>(
{{{{15}, {16}, {17}}, {{25}, {26}, {27}}}}))
.Finalize()
.value(),
{{{1, 2, 3, 4}, {1, 2, 3, 4}}});
}
TEST(SliceClosedIntervalTest, DimSubsetStriding) {
TestDimExpression(
IndexTransformBuilder<4, 4>()
.input_origin({-10, 1, 2, -kInfIndex})
.input_shape({kInfIndex + 1, 4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 1)
.output_single_input_dimension(1, 2, 3, 3)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>(
{{{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}}))
.Finalize()
.value(),
Dims(1, 2, 0, 3)
.ClosedInterval({3, 2, 10, 1}, {2, 4, -5, kImplicit},
{-1, 2, -2, 4}),
{1, 2, 0, 3},
IndexTransformBuilder<4, 4>()
.input_origin({-5, -3, 1, 0})
.input_shape({8, 2, 2, 2})
.output_single_input_dimension(0, 0, -2, 0)
.output_single_input_dimension(1, 0, -1, 1)
.output_single_input_dimension(2, 0, 2, 2)
.output_single_input_dimension(3, 1, 4, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 4>()
.input_origin({-5, -3, 1, 0})
.input_shape({8, 2, 2, 2})
.output_single_input_dimension(0, 1, -4, 1)
.output_single_input_dimension(1, 5, 12, 3)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{{25}, {27}}, {{15}, {17}}}}))
.Finalize()
.value(),
{{{2, 2, 2, 5}, {-1, -2, 1, 1}}});
}
TEST(SliceClosedIntervalTest, UnboundedStart) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(kImplicit, 9),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({5})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({5})
.Finalize()
.value(),
{});
}
TEST(SliceClosedIntervalTest, OneDimensionalNegativeStridedUnboundedStop) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(12, kImplicit, -1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-12})
.input_shape({8})
.output_single_input_dimension(0, 0, -1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({-12})
.input_shape({8})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, OneDimensionalUnstrided) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).HalfOpenInterval(6, 10),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({6})
.input_shape({4})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({6})
.input_shape({4})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, OneDimensionalUnstridedUnboundedStart) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).HalfOpenInterval(kImplicit, 10),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({5})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({5})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, OneDimensionalUnstridedUnboundedStop) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).HalfOpenInterval(6, kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({6})
.input_shape({9})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({6})
.input_shape({9})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, OneDimensionalNegativeStridedUnboundedStop) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).HalfOpenInterval(12, kImplicit, -1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-12})
.input_shape({8})
.output_single_input_dimension(0, 0, -1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({-12})
.input_shape({8})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<1, 0>().Finalize().value(),
Dims(0).HalfOpenInterval(6, std::numeric_limits<Index>::min() + 1),
absl::StatusCode::kInvalidArgument,
StrCat(".* do not specify a valid closed index interval"));
}
TEST(SliceClosedIntervalTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(6, 10, 0),
absl::StatusCode::kInvalidArgument,
".*Invalid stride 0");
TestDimExpressionError(
IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(6, 4), absl::StatusCode::kInvalidArgument,
".*\\(6, 4\\) do not specify a valid closed index interval");
TestDimExpressionError(
IndexTransformBuilder<1, 0>().input_shape({10}).Finalize().value(),
Dims(0).ClosedInterval(-kInfIndex, 4, 2),
absl::StatusCode::kInvalidArgument,
".*Slicing with non-unit stride of 2 requires a finite start index");
TestDimExpressionError(
IndexTransformBuilder<1, 0>()
.input_origin({2})
.input_shape({kInfIndex - 2 + 1})
.Finalize()
.value(),
Dims(0).ClosedInterval(kInfIndex, 4, -2),
absl::StatusCode::kInvalidArgument,
".*Slicing with non-unit stride of -2 requires a finite start index");
TestDimExpressionError(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(6, 15),
absl::StatusCode::kOutOfRange,
".*Slice interval \\[6, 16\\) is not "
"contained within domain \\[5, 15\\)");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, 0,
std::numeric_limits<Index>::max(), 0)
.Finalize()
.value(),
Dims(0).ClosedInterval(5, 10, 3), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension 0");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, std::numeric_limits<Index>::max(),
1, 0)
.Finalize()
.value(),
Dims(0).ClosedInterval(5, 10, 3), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension 0");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, 0,
std::numeric_limits<Index>::max(), 0)
.Finalize()
.value(),
Dims(0).ClosedInterval(5, 10, 2), absl::StatusCode::kInvalidArgument,
"Integer overflow computing stride for output dimension 0");
}
TEST(SliceTranslateClosedIntervalTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 0>().Finalize().value(),
Dims(0).TranslateClosedInterval(-kInfIndex, 100),
absl::StatusCode::kInvalidArgument,
".*Interval \\(-inf, 101\\) is not bounded below");
}
TEST(SliceSizedIntervalTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(6, -2),
absl::StatusCode::kInvalidArgument,
".*Negative size -2 specified for sized interval");
TestDimExpressionError(IndexTransformBuilder<1, 0>().Finalize().value(),
Dims(0).SizedInterval(6, kInfIndex - 1, 100),
absl::StatusCode::kOutOfRange,
".*Integer overflow computing slice result");
TestDimExpressionError(IndexTransformBuilder<1, 0>().Finalize().value(),
Dims(0).SizedInterval(6, kInfSize - 1),
absl::StatusCode::kOutOfRange,
".*Integer overflow computing slice result");
}
TEST(SliceSizedIntervalTest, OneDimensionalUnstrided) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(6, 3),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({6})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({6})
.input_shape({3})
.Finalize()
.value(),
{});
}
TEST(SliceSizedIntervalTest, OneDimensionalUnstridedUnboundedMin) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(kImplicit, 3),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({3})
.Finalize()
.value(),
{});
}
TEST(SliceSizedIntervalTest, OneDimensionalUnstridedUnboundedSize) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(6, kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({6})
.input_shape({9})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({6})
.input_shape({9})
.Finalize()
.value(),
{});
}
TEST(SliceSizedIntervalTest, OneDimensionalPositiveStrided) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(7, 3, 2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({3})
.output_single_input_dimension(0, 1, 2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({3})
.input_shape({3})
.Finalize()
.value(),
{});
}
TEST(SliceSizedIntervalTest, OneDimensionalPositiveStridedUnboundedSize) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(6, kImplicit, 2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({3})
.input_shape({5})
.Finalize()
.value(),
{});
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(7, kImplicit, 2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({4})
.output_single_input_dimension(0, 1, 2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({3})
.input_shape({4})
.Finalize()
.value(),
{});
}
TEST(SliceSizedIntervalTest, OneDimensionalNegativeStrided) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(13, 3, -2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-6})
.input_shape({3})
.output_single_input_dimension(0, 1, -2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({-6})
.input_shape({3})
.Finalize()
.value(),
{});
}
TEST(SliceSizedIntervalTest, OneDimensionalNegativeStridedUnboundedSize) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(13, kImplicit, -2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-6})
.input_shape({5})
.output_single_input_dimension(0, 1, -2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({-6})
.input_shape({5})
.Finalize()
.value(),
{});
}
TEST(StrideTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 1})
.input_inclusive_max({6, 5, 8})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({-3, 2, 1})
.input_inclusive_max({0, 5, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 0, -2, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, 3, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{4, 3, 3}, {-2, 3, 1}},
};
TestDimExpression(
original_transform,
Dims(0, 2).Stride({-2, 3}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(
original_transform,
Dims("x", "z").Stride({-2, 3}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(StrideTest, ErrorZeroStride) {
TestDimExpressionError(
IndexTransformBuilder<1, 0>().Finalize().value(), Dims(0).Stride(0),
absl::StatusCode::kInvalidArgument,
StrCat("Applying stride to input dimension 0: Stride must be non-zero"));
}
TEST(StrideTest, ErrorStrideOverflow) {
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 0,
std::numeric_limits<Index>::min(), 0)
.Finalize()
.value(),
Dims(0).Stride(std::numeric_limits<Index>::min()),
absl::StatusCode::kInvalidArgument,
StrCat("Integer overflow computing stride for output dimension 0"));
}
TEST(BoxSliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_inclusive_max({6, 5, 9})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 4})
.input_inclusive_max({3, 5, 7})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{1, 3, 4}, {1, 3, 4}},
};
TestDimExpression(
original_transform,
Dims(0, 2).BoxSlice(BoxView({1, 4}, {3, 4})),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(
original_transform,
Dims("x", "z").BoxSlice(BoxView({1, 4}, {3, 4})),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateBoxSliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_inclusive_max({6, 5, 9})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_inclusive_max({2, 5, 3})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 1, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 4, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{1, 3, 4}, {0, 3, 0}},
};
TestDimExpression(
original_transform,
Dims(0, 2).TranslateBoxSlice(BoxView({1, 4}, {3, 4})),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(
original_transform,
Dims("x", "z").TranslateBoxSlice(BoxView({1, 4}, {3, 4})),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/interval_slice_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/interval_slice_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d8944e80-26e6-4dc5-8df1-c051bf2b5ecb | cpp | google/tensorstore | add_new_dims_op | tensorstore/index_space/internal/add_new_dims_op.cc | tensorstore/index_space/add_new_dims_op_test.cc | #include "tensorstore/index_space/internal/add_new_dims_op.h"
#include <cassert>
#include <utility>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
void AddNewDims(TransformRep* original, TransformRep* result,
DimensionIndexBuffer* dimensions, bool domain_only) {
const DimensionIndex orig_input_rank = original->input_rank;
const DimensionIndex new_input_rank = orig_input_rank + dimensions->size();
assert(result->input_rank_capacity >= new_input_rank);
const DimensionIndex output_rank = domain_only ? 0 : original->output_rank;
assert(result->output_rank_capacity >= output_rank);
DimensionSet newly_added_input_dims;
for (DimensionIndex new_input_dim : *dimensions) {
newly_added_input_dims[new_input_dim] = true;
}
DimensionIndex orig_to_new_input_dim[kMaxRank];
for (DimensionIndex new_input_dim = 0, orig_input_dim = 0;
new_input_dim < new_input_rank; ++new_input_dim) {
if (newly_added_input_dims[new_input_dim]) continue;
orig_to_new_input_dim[orig_input_dim] = new_input_dim;
++orig_input_dim;
}
span<const OutputIndexMap> orig_maps =
original->output_index_maps().first(output_rank);
span<OutputIndexMap> result_maps =
result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& orig_map = orig_maps[output_dim];
auto& result_map = result_maps[output_dim];
result_map.stride() = orig_map.stride();
result_map.offset() = orig_map.offset();
switch (orig_map.method()) {
case OutputIndexMethod::constant:
result_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex orig_input_dim = orig_map.input_dimension();
assert(orig_input_dim >= 0 && orig_input_dim < orig_input_rank);
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
result_map.SetSingleInputDimension(new_input_dim);
break;
}
case OutputIndexMethod::array: {
auto& result_index_array = result_map.SetArrayIndexing(new_input_rank);
const auto& orig_index_array = orig_map.index_array_data();
for (DimensionIndex orig_input_dim = orig_input_rank - 1;
orig_input_dim >= 0; --orig_input_dim) {
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
assert(new_input_dim >= orig_input_dim);
result_index_array.byte_strides[new_input_dim] =
orig_index_array.byte_strides[orig_input_dim];
}
for (const DimensionIndex new_input_dim : *dimensions) {
result_index_array.byte_strides[new_input_dim] = 0;
}
result_index_array.index_range = orig_index_array.index_range;
result_index_array.element_pointer = orig_index_array.element_pointer;
break;
}
}
}
for (DimensionIndex orig_input_dim = orig_input_rank - 1; orig_input_dim >= 0;
--orig_input_dim) {
const DimensionIndex new_input_dim = orig_to_new_input_dim[orig_input_dim];
result->input_dimension(new_input_dim) =
original->input_dimension(orig_input_dim);
}
for (DimensionIndex new_input_dim : *dimensions) {
const auto d = result->input_dimension(new_input_dim);
d.domain() = IndexInterval::UncheckedSized(-kInfIndex, kInfSize);
d.implicit_lower_bound() = true;
d.implicit_upper_bound() = true;
d.SetEmptyLabel();
}
result->input_rank = new_input_rank;
result->output_rank = output_rank;
}
}
Result<IndexTransform<>> ApplyAddNewDims(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
const DimensionIndex new_input_rank =
transform.input_rank() + dimensions->size();
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(new_input_rank));
auto new_rep =
NewOrMutableRep(TransformAccess::rep(transform), new_input_rank,
transform.output_rank(), domain_only);
AddNewDims(TransformAccess::rep(transform), new_rep.get(), dimensions,
domain_only);
internal_index_space::DebugCheckInvariants(new_rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::TestDimExpression;
using ::tensorstore::internal_index_space::TestDimExpressionError;
TEST(AddNewTest, Example) {
const auto expected_new_transform =
IndexTransformBuilder<3, 1>()
.input_origin({-kInfIndex, 1, -kInfIndex})
.input_shape({kInfSize, 5, kInfSize})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"", "x", ""})
.output_single_input_dimension(0, 1)
.Finalize()
.value();
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
Dims(0, -1).AddNew(),
{0, 2},
expected_new_transform,
expected_new_transform,
{
{{2}, {1, 2, 8}},
{{2}, {5, 2, 9}},
},
false);
}
TEST(AddNewTest, Simple) {
TestDimExpression(
IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({3, 4})
.output_single_input_dimension(0, 1, 3, 1)
.output_single_input_dimension(1, 2, 4, 0)
.output_index_array(2, 3, 5,
MakeArray<Index>({{1, 2, 3, 4}}),
IndexInterval::Closed(-1, 10))
.Finalize()
.value(),
Dims(0, -1).AddNew(),
{0, 3},
IndexTransformBuilder<4, 2>()
.input_origin({-kInfIndex, 2, 3, -kInfIndex})
.input_shape({kInfSize, 3, 4, kInfSize})
.implicit_lower_bounds({1, 0, 0, 1})
.implicit_upper_bounds({1, 0, 0, 1})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
IndexTransformBuilder<4, 3>()
.input_origin({-kInfIndex, 2, 3, -kInfIndex})
.input_shape({kInfSize, 3, 4, kInfSize})
.implicit_lower_bounds({1, 0, 0, 1})
.implicit_upper_bounds({1, 0, 0, 1})
.output_single_input_dimension(0, 1, 3, 2)
.output_single_input_dimension(1, 2, 4, 1)
.output_index_array(
2, 3, 5, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-1, 10))
.Finalize()
.value(),
{
{{3, 4}, {100, 3, 4, 500}},
{{3, 4}, {-100, 3, 4, -500}},
},
false);
}
TEST(AddNewTest, Constant) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.output_constant(0, 1)
.Finalize()
.value(),
Dims(0).AddNew(),
{0},
IndexTransformBuilder<2, 1>()
.input_origin({-kInfIndex, 1})
.input_shape({kInfSize, 5})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 1>()
.input_origin({-kInfIndex, 1})
.input_shape({kInfSize, 5})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({1, 0})
.output_constant(0, 1)
.Finalize()
.value(),
{
{{1}, {-100, 1}},
{{1}, {100, 1}},
},
false);
}
TEST(AddNewTest, Labeled) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.input_labels({"a"})
.output_constant(0, 1)
.Finalize()
.value(),
Dims(-1, 0).AddNew().Label("x", "y"),
{2, 0},
IndexTransformBuilder<3, 1>()
.input_origin({-kInfIndex, 1, -kInfIndex})
.input_shape({kInfSize, 5, kInfSize})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"y", "a", "x"})
.output_single_input_dimension(0, 1)
.Finalize()
.value(),
IndexTransformBuilder<3, 1>()
.input_origin({-kInfIndex, 1, -kInfIndex})
.input_shape({kInfSize, 5, kInfSize})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"y", "a", "x"})
.output_constant(0, 1)
.Finalize()
.value(),
{
{{2}, {1, 2, 8}},
{{2}, {5, 2, 9}},
},
false);
}
TEST(AddNewTest, EmptyDimensionSelection) {
const auto transform = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
TestDimExpression(
transform,
Dims().AddNew(),
{},
transform,
transform,
{
{{2}, {2}},
{{3}, {3}},
},
true);
}
TEST(AddNewTest, InvalidRank) {
TestDimExpressionError(tensorstore::IdentityTransform(31),
Dims(0, 1).AddNew(),
absl::StatusCode::kInvalidArgument,
".*Rank 33 is outside valid range \\[0, 32\\]");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/add_new_dims_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/add_new_dims_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9cffab4a-af17-4482-957f-442c9956958a | cpp | google/tensorstore | keyword_arguments | python/tensorstore/keyword_arguments.h | python/tensorstore/keyword_arguments_test.cc | #ifndef PYTHON_TENSORSTORE_KEYWORD_ARGUMENTS_H_
#define PYTHON_TENSORSTORE_KEYWORD_ARGUMENTS_H_
#include <pybind11/pybind11.h>
#include <string_view>
#include "absl/strings/ascii.h"
#include "absl/strings/str_split.h"
#include "python/tensorstore/status.h"
#include "python/tensorstore/type_name_override.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_python {
template <typename T>
struct KeywordArgumentPlaceholder {
pybind11::object value;
constexpr static auto tensorstore_pybind11_type_name_override =
pybind11::detail::_("Optional[") +
pybind11::detail::make_caster<T>::name + pybind11::detail::_("]");
};
template <typename ParamDef>
using KeywordArgument = KeywordArgumentPlaceholder<typename ParamDef::type>;
template <typename ParamDef>
void AppendKeywordArgumentDoc(std::string& doc) {
tensorstore::StrAppend(&doc, " ", ParamDef::name, ": ");
std::string_view delim = "";
for (std::string_view line :
absl::StrSplit(absl::StripAsciiWhitespace(ParamDef::doc), '\n')) {
tensorstore::StrAppend(&doc, delim, line, "\n");
delim = " ";
}
}
template <typename... ParamDef>
void AppendKeywordArgumentDocs(std::string& doc, ParamDef... params) {
(AppendKeywordArgumentDoc<ParamDef>(doc), ...);
}
template <typename ParamDef>
auto MakeKeywordArgumentPyArg(ParamDef param_def) {
return (pybind11::arg(decltype(param_def)::name) = pybind11::none());
}
template <typename ParamDef, typename Target>
void SetKeywordArgumentOrThrow(Target& target, KeywordArgument<ParamDef>& arg) {
if (arg.value.is_none()) return;
pybind11::detail::make_caster<typename ParamDef::type> caster;
if (!caster.load(arg.value, true)) {
throw pybind11::type_error(tensorstore::StrCat("Invalid ", ParamDef::name));
}
auto status = ParamDef::Apply(
target,
pybind11::detail::cast_op<typename ParamDef::type&&>(std::move(caster)));
if (!status.ok()) {
ThrowStatusException(MaybeAnnotateStatus(
status, tensorstore::StrCat("Invalid ", ParamDef::name)));
}
}
template <typename... ParamDef, typename Target>
void ApplyKeywordArguments(Target& target, KeywordArgument<ParamDef>&... arg) {
(SetKeywordArgumentOrThrow<ParamDef>(target, arg), ...);
}
}
}
#endif | #include <pybind11/pybind11.h>
#include "python/tensorstore/keyword_arguments.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_python {
namespace {
namespace py = ::pybind11;
struct MyOptions {
int a = 42;
std::string b;
};
struct MyArgA {
using type = int;
constexpr static const char* name = "a";
constexpr static const char* doc = R"(
Specifies a. This documentation string is allowed
to be more than one line.
)";
static absl::Status Apply(MyOptions& self, type value) {
if (value == 0) return absl::InvalidArgumentError("Bad");
self.a = value;
return absl::OkStatus();
}
};
struct MyArgB {
using type = std::string;
constexpr static const char* name = "b";
constexpr static const char* doc = "Specifies b.";
static absl::Status Apply(MyOptions& self, type value) {
self.b = value;
return absl::OkStatus();
}
};
constexpr auto WithMyKeywordArguments = [](auto callback) {
callback(MyArgA{}, MyArgB{});
};
[[maybe_unused]] void RegisterBindings(py::module m) {
WithMyKeywordArguments([&](auto... param_def) {
std::string doc = R"(
Does something or other with keyword arguments.
Args:
required_arg: This is required
)";
AppendKeywordArgumentDocs(doc, param_def...);
doc += R"(
Overload:
components
)";
m.def(
"myfunc",
[](int required_arg,
KeywordArgument<decltype(param_def)>... kwarg) {
MyOptions options;
ApplyKeywordArguments<decltype(param_def)...>(options, kwarg...);
return tensorstore::StrCat(options.a, ", ", options.b);
},
doc.c_str(), py::arg("required_arg"), py::kw_only(),
MakeKeywordArgumentPyArg(param_def)...);
});
}
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/python/tensorstore/keyword_arguments.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/python/tensorstore/keyword_arguments_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
34ccf62e-4ef7-427f-ac2d-6abf3f4c45d8 | cpp | google/tensorstore | data_type_conversion | tensorstore/data_type_conversion.h | tensorstore/data_type_conversion_test.cc | #ifndef TENSORSTORE_DATA_TYPE_CONVERSION_H_
#define TENSORSTORE_DATA_TYPE_CONVERSION_H_
#include <array>
#include <complex>
#include <limits>
#include <type_traits>
#include "tensorstore/data_type.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
template <typename From, typename To>
struct ConvertDataType {
void operator()(const From* from, To* to, void* arg) const {
*to = static_cast<To>(*from);
}
};
template <typename From, typename To>
struct DataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags = DataTypeConversionFlags{};
};
template <typename From, typename To,
DataTypeConversionFlags AdditionalFlags = DataTypeConversionFlags{}>
constexpr inline bool IsDataTypeConversionSupported =
((DataTypeConversionTraits<From, To>::flags &
(DataTypeConversionFlags::kSupported | AdditionalFlags)) ==
(DataTypeConversionFlags::kSupported | AdditionalFlags));
template <typename From, DataTypeConversionFlags AdditionalFlags>
constexpr inline bool
IsDataTypeConversionSupported<From, void, AdditionalFlags> = true;
template <typename To, DataTypeConversionFlags AdditionalFlags>
constexpr inline bool IsDataTypeConversionSupported<void, To, AdditionalFlags> =
true;
template <typename T, DataTypeConversionFlags AdditionalFlags>
constexpr inline bool IsDataTypeConversionSupported<T, T, AdditionalFlags> =
true;
template <DataTypeConversionFlags AdditionalFlags>
constexpr inline bool
IsDataTypeConversionSupported<void, void, AdditionalFlags> = true;
namespace internal {
extern const std::array<DataTypeOperations::CanonicalConversionOperations,
kNumDataTypeIds>
canonical_data_type_conversions;
DataTypeConversionLookupResult GetDataTypeConverter(DataType from, DataType to);
Result<DataTypeConversionLookupResult> GetDataTypeConverterOrError(
DataType from, DataType to, DataTypeConversionFlags required_flags = {});
}
namespace internal_data_type {
template <typename From, typename To>
std::enable_if_t<((DataTypeConversionTraits<From, To>::flags &
(DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kCanReinterpretCast)) ==
DataTypeConversionFlags::kSupported &&
!std::is_same_v<From, To>),
internal::ElementwiseFunction<2, void*>>
GetConvertFunction() {
return internal::SimpleElementwiseFunction<
ConvertDataType<From, To>(From, const To), void*>();
}
template <typename From, typename To>
std::enable_if_t<((DataTypeConversionTraits<From, To>::flags &
(DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kCanReinterpretCast)) !=
DataTypeConversionFlags::kSupported ||
std::is_same_v<From, To>),
internal::ElementwiseFunction<2, void*>>
GetConvertFunction() {
return {};
}
template <typename From>
constexpr internal::DataTypeOperations::CanonicalConversionOperations
GetConvertToCanonicalOperations() {
return {
MapCanonicalDataTypes([](auto dtype) {
using X = typename decltype(dtype)::Element;
return GetConvertFunction<From, X>();
}),
MapCanonicalDataTypes([](auto dtype) {
using X = typename decltype(dtype)::Element;
return DataTypeConversionTraits<From, X>::flags;
}),
};
}
}
namespace internal_data_type {
template <typename From, typename To>
struct IntegerIntegerDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionFlags::kSupported |
((std::numeric_limits<From>::digits <= std::numeric_limits<To>::digits &&
std::numeric_limits<From>::is_signed <=
std::numeric_limits<To>::is_signed)
? DataTypeConversionFlags::kSafeAndImplicit
: DataTypeConversionFlags{}) |
((std::numeric_limits<From>::digits +
std::numeric_limits<From>::is_signed ==
std::numeric_limits<To>::digits + std::numeric_limits<To>::is_signed)
? DataTypeConversionFlags::kCanReinterpretCast
: DataTypeConversionFlags{});
};
template <typename From, typename To>
struct IntegerFloatDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionFlags::kSupported |
((std::numeric_limits<From>::digits <= std::numeric_limits<To>::digits)
? DataTypeConversionFlags::kSafeAndImplicit
: DataTypeConversionFlags{});
};
template <typename From, typename To>
struct FloatFloatDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionFlags::kSupported |
((std::numeric_limits<From>::digits <= std::numeric_limits<To>::digits &&
std::numeric_limits<From>::min_exponent >=
std::numeric_limits<To>::min_exponent &&
std::numeric_limits<From>::max_exponent <=
std::numeric_limits<To>::max_exponent)
? DataTypeConversionFlags::kSafeAndImplicit
: DataTypeConversionFlags{});
};
template <typename From, typename To>
struct NumericComplexDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionTraits<From, typename To::value_type>::flags &
(DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit);
};
template <typename From, typename To>
struct ComplexComplexDataTypeConversionTraits
: public DataTypeConversionTraits<typename From::value_type,
typename To::value_type> {};
template <typename From, typename To>
struct IntegerJsonDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionFlags::kSupported |
((std::numeric_limits<From>::digits <= 64)
? DataTypeConversionFlags::kSafeAndImplicit
: DataTypeConversionFlags{});
};
template <typename From, typename To>
struct FloatJsonDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionTraits<From, double>::flags &
(DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit);
};
}
template <typename T>
struct DataTypeConversionTraits<std::complex<T>, ::tensorstore::dtypes::json_t>
: public DataTypeConversionTraits<T, ::tensorstore::dtypes::json_t> {};
#define TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(FROM, TO, ...) \
template <> \
struct DataTypeConversionTraits<FROM, TO> { \
using From = FROM; \
using To = TO; \
constexpr static DataTypeConversionFlags flags = __VA_ARGS__; \
}; \
#define TENSORSTORE_INTERNAL_INHERITED_CONVERT(FROM, TO, PARENT) \
template <> \
struct DataTypeConversionTraits<FROM, TO> : public PARENT<FROM, TO> {}; \
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::char_t, ::tensorstore::dtypes::byte_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit |
DataTypeConversionFlags::kCanReinterpretCast)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::ustring_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit |
DataTypeConversionFlags::kCanReinterpretCast)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e4m3fn_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e5m2_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float16_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::bfloat16_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float32_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float64_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::complex64_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::complex128_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::json_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::byte_t, ::tensorstore::dtypes::char_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::ustring_t, ::tensorstore::dtypes::json_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::uint16_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::uint32_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::uint64_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::string_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::ustring_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float8_e4m3fn_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float8_e5m2_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::bfloat16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float8_e4m3fn_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float8_e5m2_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::bfloat16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e4m3fn_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e5m2_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::bfloat16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::string_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::string_t, ::tensorstore::dtypes::json_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::ComplexComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::ComplexComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::ComplexComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::ComplexComplexDataTypeConversionTraits)
#undef TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS
#undef TENSORSTORE_INTERNAL_INHERITED_CONVERT
}
#endif | #include "tensorstore/data_type_conversion.h"
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/half_gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DataTypeConversionFlags;
using ::tensorstore::DataTypeConversionTraits;
using ::tensorstore::dtype_v;
using ::tensorstore::Index;
using ::tensorstore::IsDataTypeConversionSupported;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::StrCat;
using ::tensorstore::internal::GetDataTypeConverter;
using ::tensorstore::internal::GetDataTypeConverterOrError;
using ::tensorstore::internal::GetElementCopyErrorStatus;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferPointer;
#define X(T, ...) \
using ::tensorstore::dtypes::T; \
TENSORSTORE_FOR_EACH_DATA_TYPE(X)
#undef X
constexpr DataTypeConversionFlags kSupported =
DataTypeConversionFlags::kSupported;
constexpr DataTypeConversionFlags kIdentity =
DataTypeConversionFlags::kIdentity;
constexpr DataTypeConversionFlags kSafeAndImplicit =
DataTypeConversionFlags::kSafeAndImplicit;
constexpr DataTypeConversionFlags kCanReinterpretCast =
DataTypeConversionFlags::kCanReinterpretCast;
template <typename From, typename To>
void TestUnsupported() {
static_assert(DataTypeConversionTraits<From, To>::flags ==
DataTypeConversionFlags{});
static_assert(!IsDataTypeConversionSupported<From, To>);
auto r = GetDataTypeConverter(dtype_v<From>, dtype_v<To>);
EXPECT_EQ(DataTypeConversionFlags{}, r.flags);
}
template <typename To, typename From>
Result<To> TestConversion(
From from, DataTypeConversionFlags flags = DataTypeConversionFlags{}) {
SCOPED_TRACE(
StrCat("TestConversion<To=", dtype_v<To>, ", From=", dtype_v<From>, ">")
.c_str());
flags = flags | kSupported;
if constexpr (!std::is_same_v<To, From>) {
EXPECT_EQ(flags, (DataTypeConversionTraits<From, To>::flags));
}
EXPECT_EQ(!!(flags & kSafeAndImplicit),
(IsDataTypeConversionSupported<From, To, kSafeAndImplicit>));
EXPECT_TRUE((IsDataTypeConversionSupported<From, To>));
auto r = GetDataTypeConverter(dtype_v<From>, dtype_v<To>);
EXPECT_EQ(flags, r.flags);
To value;
absl::Status status;
if ((*r.closure.function)[IterationBufferKind::kContiguous](
r.closure.context, {1, 1},
IterationBufferPointer(&from, Index(0), Index(0)),
IterationBufferPointer(&value, Index(0), Index(0)), &status) != 1) {
return GetElementCopyErrorStatus(std::move(status));
}
return value;
}
TEST(DataTypeConversionTest, Bool) {
EXPECT_EQ(false, TestConversion<bool_t>(false, kSafeAndImplicit | kIdentity |
kCanReinterpretCast));
EXPECT_EQ(true, TestConversion<bool_t>(true, kSafeAndImplicit | kIdentity |
kCanReinterpretCast));
EXPECT_EQ(0, TestConversion<int4_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int4_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<int8_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int8_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<int16_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int16_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<int32_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int32_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<int64_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int64_t>(true, kSafeAndImplicit));
EXPECT_EQ(0u, TestConversion<uint8_t>(false, kSafeAndImplicit));
EXPECT_EQ(1u, TestConversion<uint8_t>(true, kSafeAndImplicit));
EXPECT_EQ(0u, TestConversion<uint16_t>(false, kSafeAndImplicit));
EXPECT_EQ(1u, TestConversion<uint16_t>(true, kSafeAndImplicit));
EXPECT_EQ(0u, TestConversion<uint32_t>(false, kSafeAndImplicit));
EXPECT_EQ(1u, TestConversion<uint32_t>(true, kSafeAndImplicit));
EXPECT_EQ(0u, TestConversion<uint64_t>(false, kSafeAndImplicit));
EXPECT_EQ(1u, TestConversion<uint64_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<float16_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<float16_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<bfloat16_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<bfloat16_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<float32_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<float32_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<float64_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<float64_t>(true, kSafeAndImplicit));
EXPECT_EQ(complex64_t(0),
TestConversion<complex64_t>(false, kSafeAndImplicit));
EXPECT_EQ(complex64_t(1),
TestConversion<complex64_t>(true, kSafeAndImplicit));
EXPECT_EQ(complex128_t(0),
TestConversion<complex128_t>(false, kSafeAndImplicit));
EXPECT_EQ(complex128_t(1),
TestConversion<complex128_t>(true, kSafeAndImplicit));
EXPECT_EQ(json_t(false), TestConversion<json_t>(false, kSafeAndImplicit));
EXPECT_EQ(json_t(true), TestConversion<json_t>(true, kSafeAndImplicit));
TestUnsupported<bool, string_t>();
TestUnsupported<bool, ustring_t>();
}
TEST(DataTypeConversionTest, Int4) {
using T = int4_t;
constexpr T pos{7};
constexpr T neg{-8};
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(static_cast<int8_t>(neg),
TestConversion<int8_t>(neg, kSafeAndImplicit));
EXPECT_EQ(static_cast<int8_t>(pos),
TestConversion<int8_t>(pos, kSafeAndImplicit));
EXPECT_EQ(static_cast<int16_t>(neg),
TestConversion<int16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(static_cast<int32_t>(neg),
TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(static_cast<int64_t>(neg),
TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(static_cast<uint8_t>(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(static_cast<uint8_t>(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(static_cast<uint16_t>(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(static_cast<uint16_t>(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(static_cast<uint32_t>(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(static_cast<uint32_t>(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(static_cast<uint64_t>(neg), TestConversion<uint64_t>(neg));
EXPECT_EQ(static_cast<uint64_t>(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("-8", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-8"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Int8) {
using T = int8_t;
constexpr T pos = 42;
constexpr T neg = -42;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg),
TestConversion<int8_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(int8_t(pos),
TestConversion<int8_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos, kCanReinterpretCast));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("-42", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-42"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Uint8) {
using T = uint8_t;
constexpr T pos = 42;
constexpr T neg = -42;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg, kCanReinterpretCast));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos, kCanReinterpretCast));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg),
TestConversion<uint8_t>(
neg, kCanReinterpretCast | kSafeAndImplicit | kIdentity));
EXPECT_EQ(uint8_t(pos),
TestConversion<uint8_t>(
pos, kCanReinterpretCast | kSafeAndImplicit | kIdentity));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos, kSafeAndImplicit));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("214", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"214"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Int16) {
using T = int16_t;
constexpr T pos = 12345;
constexpr T neg = -12345;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg),
TestConversion<int16_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos, kCanReinterpretCast));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("-12345", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-12345"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Uint16) {
using T = uint16_t;
constexpr T pos = 12345;
constexpr T neg = -12345;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg, kCanReinterpretCast));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg),
TestConversion<uint16_t>(
neg, kCanReinterpretCast | kIdentity | kSafeAndImplicit));
EXPECT_EQ(uint16_t(pos),
TestConversion<uint16_t>(
pos, kCanReinterpretCast | kIdentity | kSafeAndImplicit));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("53191", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"53191"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Int32) {
using T = int32_t;
constexpr T pos = 123456789;
constexpr T neg = -123456789;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg));
EXPECT_EQ(int32_t(neg),
TestConversion<int32_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos, kCanReinterpretCast));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(static_cast<float>(neg)),
TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(static_cast<float>(neg)),
TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)), TestConversion<complex64_t>(neg));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("-123456789", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-123456789"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Uint32) {
using T = uint32_t;
constexpr T pos = 123456789;
constexpr T neg = -123456789;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kCanReinterpretCast));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg),
TestConversion<uint32_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(uint32_t(pos),
TestConversion<uint32_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float16_t(static_cast<float>(neg)),
TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(static_cast<float>(neg)),
TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)), TestConversion<complex64_t>(neg));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("4171510507", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"4171510507"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Int64) {
using T = int64_t;
constexpr T pos = 123456789012345;
constexpr T neg = -123456789012345;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg));
EXPECT_EQ(int64_t(neg),
TestConversion<int64_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos, kCanReinterpretCast));
EXPECT_EQ(float16_t(static_cast<float>(neg)),
TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(static_cast<float>(neg)),
TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg));
EXPECT_EQ(complex64_t(float32_t(neg)), TestConversion<complex64_t>(neg));
EXPECT_EQ(complex128_t(float64_t(neg)), TestConversion<complex128_t>(neg));
EXPECT_EQ("-123456789012345", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-123456789012345"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Uint64) {
using T = uint64_t;
constexpr T pos = 123456789012345;
constexpr T neg = -123456789012345;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(neg),
TestConversion<uint64_t>(
neg, kCanReinterpretCast | kSafeAndImplicit | kIdentity));
EXPECT_EQ(uint64_t(pos),
TestConversion<uint64_t>(
pos, kCanReinterpretCast | kSafeAndImplicit | kIdentity));
EXPECT_EQ(float16_t(static_cast<float>(neg)),
TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(static_cast<float>(neg)),
TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg));
EXPECT_EQ(complex64_t(float32_t(neg)), TestConversion<complex64_t>(neg));
EXPECT_EQ(complex128_t(float64_t(neg)), TestConversion<complex128_t>(neg));
EXPECT_EQ("18446620616920539271", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"18446620616920539271"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Float16) {
using T = float16_t;
const T pos(42.5);
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(static_cast<int4_t>(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(static_cast<int8_t>(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(static_cast<int16_t>(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(static_cast<int32_t>(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(static_cast<int64_t>(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(static_cast<uint8_t>(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(static_cast<uint16_t>(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(static_cast<uint32_t>(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(static_cast<uint64_t>(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(static_cast<float16_t>(pos),
TestConversion<float16_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(static_cast<bfloat16_t>(pos), TestConversion<bfloat16_t>(pos));
EXPECT_EQ(static_cast<float32_t>(pos),
TestConversion<float32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(static_cast<float64_t>(pos),
TestConversion<float64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(pos)),
TestConversion<complex64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("42.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"42.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(42.5), TestConversion<json_t>(pos, kSafeAndImplicit));
}
template <typename InternalFloat>
class InternalFloat8Test : public ::testing::Test {};
using InternalFloat8Types =
::testing::Types<float8_e4m3fn_t, float8_e4m3fnuz_t, float8_e4m3b11fnuz_t,
float8_e5m2_t, float8_e5m2fnuz_t>;
TYPED_TEST_SUITE(InternalFloat8Test, InternalFloat8Types);
TYPED_TEST(InternalFloat8Test, DataTypeConversionTest_InternalFloat8Types) {
using T = TypeParam;
const T pos(3.5);
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(int32_t(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(int64_t(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(T(pos), TestConversion<T>(pos, kSafeAndImplicit | kIdentity |
kCanReinterpretCast));
if (!std::is_same_v<T, float8_e4m3fn_t>) {
EXPECT_EQ(float8_e4m3fn_t(pos), TestConversion<float8_e4m3fn_t>(pos));
}
if (!std::is_same_v<T, float8_e4m3fnuz_t>) {
EXPECT_EQ(float8_e4m3fnuz_t(pos), TestConversion<float8_e4m3fnuz_t>(pos));
}
if (!std::is_same_v<T, float8_e4m3b11fnuz_t>) {
EXPECT_EQ(float8_e4m3b11fnuz_t(pos),
TestConversion<float8_e4m3b11fnuz_t>(pos));
}
if (!std::is_same_v<T, float8_e5m2fnuz_t>) {
if (std::is_same_v<T, float8_e5m2_t>) {
EXPECT_EQ(float8_e5m2fnuz_t(pos),
TestConversion<float8_e5m2fnuz_t>(pos, kSafeAndImplicit));
} else {
EXPECT_EQ(float8_e5m2fnuz_t(pos), TestConversion<float8_e5m2fnuz_t>(pos));
}
}
if (!std::is_same_v<T, float8_e5m2_t>) {
EXPECT_EQ(float8_e5m2_t(pos), TestConversion<float8_e5m2_t>(pos));
}
if (std::is_same_v<T, float8_e5m2fnuz_t>) {
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos));
} else {
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos, kSafeAndImplicit));
}
EXPECT_EQ(bfloat16_t(pos), TestConversion<bfloat16_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float32_t(pos), TestConversion<float32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float64_t(pos), TestConversion<float64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(pos)),
TestConversion<complex64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("3.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"3.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(3.5), TestConversion<json_t>(pos, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Bfloat16) {
using T = bfloat16_t;
const T pos(42.5);
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(int32_t(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(int64_t(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos));
EXPECT_EQ(bfloat16_t(pos),
TestConversion<bfloat16_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(float32_t(pos), TestConversion<float32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float64_t(pos), TestConversion<float64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(pos)),
TestConversion<complex64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("42.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"42.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(42.5), TestConversion<json_t>(pos, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Float32) {
using T = float32_t;
constexpr T pos = 42.5;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(int32_t(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(int64_t(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos));
EXPECT_EQ(bfloat16_t(pos), TestConversion<bfloat16_t>(pos));
EXPECT_EQ(float32_t(pos),
TestConversion<float32_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(float64_t(pos), TestConversion<float64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(pos)),
TestConversion<complex64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("42.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"42.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(pos), TestConversion<json_t>(pos, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Float64) {
using T = float64_t;
constexpr T pos = 42.5;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(int32_t(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(int64_t(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos));
EXPECT_EQ(bfloat16_t(pos), TestConversion<bfloat16_t>(pos));
EXPECT_EQ(float32_t(pos), TestConversion<float32_t>(pos));
EXPECT_EQ(float64_t(pos),
TestConversion<float64_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(complex64_t(float32_t(pos)), TestConversion<complex64_t>(pos));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("42.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"42.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(pos), TestConversion<json_t>(pos, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Complex64) {
using T = complex64_t;
constexpr T value(42.5, 43.5);
EXPECT_EQ(int4_t(value.real()), TestConversion<int4_t>(value));
EXPECT_EQ(int8_t(value.real()), TestConversion<int8_t>(value));
EXPECT_EQ(int16_t(value.real()), TestConversion<int16_t>(value));
EXPECT_EQ(int32_t(value.real()), TestConversion<int32_t>(value));
EXPECT_EQ(int64_t(value.real()), TestConversion<int64_t>(value));
EXPECT_EQ(uint8_t(value.real()), TestConversion<uint8_t>(value));
EXPECT_EQ(uint8_t(value.real()), TestConversion<uint8_t>(value));
EXPECT_EQ(uint16_t(value.real()), TestConversion<uint16_t>(value));
EXPECT_EQ(uint16_t(value.real()), TestConversion<uint16_t>(value));
EXPECT_EQ(uint32_t(value.real()), TestConversion<uint32_t>(value));
EXPECT_EQ(uint32_t(value.real()), TestConversion<uint32_t>(value));
EXPECT_EQ(uint64_t(value.real()), TestConversion<uint64_t>(value));
EXPECT_EQ(uint64_t(value.real()), TestConversion<uint64_t>(value));
EXPECT_EQ(float16_t(value.real()), TestConversion<float16_t>(value));
EXPECT_EQ(bfloat16_t(value.real()), TestConversion<bfloat16_t>(value));
EXPECT_EQ(float32_t(value.real()), TestConversion<float32_t>(value));
EXPECT_EQ(float64_t(value.real()), TestConversion<float64_t>(value));
EXPECT_EQ(complex64_t(value),
TestConversion<complex64_t>(
value, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(complex128_t(value),
TestConversion<complex128_t>(value, kSafeAndImplicit));
EXPECT_EQ("(42.5,43.5)", TestConversion<string_t>(value));
EXPECT_EQ(ustring_t{"(42.5,43.5)"}, TestConversion<ustring_t>(value));
EXPECT_EQ(json_t(json_t::array_t{value.real(), value.imag()}),
TestConversion<json_t>(value, kSafeAndImplicit));
TestUnsupported<T, bool>();
}
TEST(DataTypeConversionTest, Complex128) {
using T = complex128_t;
constexpr T value(42.5, 43.5);
EXPECT_EQ(int4_t(value.real()), TestConversion<int4_t>(value));
EXPECT_EQ(int8_t(value.real()), TestConversion<int8_t>(value));
EXPECT_EQ(int16_t(value.real()), TestConversion<int16_t>(value));
EXPECT_EQ(int32_t(value.real()), TestConversion<int32_t>(value));
EXPECT_EQ(int64_t(value.real()), TestConversion<int64_t>(value));
EXPECT_EQ(uint8_t(value.real()), TestConversion<uint8_t>(value));
EXPECT_EQ(uint16_t(value.real()), TestConversion<uint16_t>(value));
EXPECT_EQ(uint32_t(value.real()), TestConversion<uint32_t>(value));
EXPECT_EQ(uint64_t(value.real()), TestConversion<uint64_t>(value));
EXPECT_EQ(float16_t(value.real()), TestConversion<float16_t>(value));
EXPECT_EQ(bfloat16_t(value.real()), TestConversion<bfloat16_t>(value));
EXPECT_EQ(float32_t(value.real()), TestConversion<float32_t>(value));
EXPECT_EQ(float64_t(value.real()), TestConversion<float64_t>(value));
EXPECT_EQ(complex64_t(value), TestConversion<complex64_t>(value));
EXPECT_EQ(complex128_t(value),
TestConversion<complex128_t>(
value, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ("(42.5,43.5)", TestConversion<string_t>(value));
EXPECT_EQ(ustring_t{"(42.5,43.5)"}, TestConversion<ustring_t>(value));
EXPECT_EQ(json_t(json_t::array_t{value.real(), value.imag()}),
TestConversion<json_t>(value, kSafeAndImplicit));
TestUnsupported<T, bool>();
}
TEST(DataTypeConversionTest, String) {
using T = string_t;
T value = "test";
T invalid_utf8 = "test\xa0";
TestUnsupported<T, bool>();
TestUnsupported<T, int4_t>();
TestUnsupported<T, int8_t>();
TestUnsupported<T, uint8_t>();
TestUnsupported<T, int16_t>();
TestUnsupported<T, uint16_t>();
TestUnsupported<T, int32_t>();
TestUnsupported<T, uint32_t>();
TestUnsupported<T, int64_t>();
TestUnsupported<T, uint64_t>();
TestUnsupported<T, float16_t>();
TestUnsupported<T, bfloat16_t>();
TestUnsupported<T, float32_t>();
TestUnsupported<T, float64_t>();
TestUnsupported<T, complex64_t>();
TestUnsupported<T, complex128_t>();
EXPECT_EQ(value,
TestConversion<string_t>(
value, kSafeAndImplicit | kCanReinterpretCast | kIdentity));
EXPECT_EQ(ustring_t{value}, TestConversion<ustring_t>(value));
EXPECT_THAT(TestConversion<ustring_t>(invalid_utf8),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid UTF-8 sequence encountered"));
EXPECT_EQ(json_t("test"), TestConversion<json_t>(value));
EXPECT_THAT(TestConversion<json_t>(invalid_utf8),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid UTF-8 sequence encountered"));
}
TEST(DataTypeConversionTest, Ustring) {
using T = ustring_t;
T value{"test"};
TestUnsupported<T, bool>();
TestUnsupported<T, int4_t>();
TestUnsupported<T, int8_t>();
TestUnsupported<T, uint8_t>();
TestUnsupported<T, int16_t>();
TestUnsupported<T, uint16_t>();
TestUnsupported<T, int32_t>();
TestUnsupported<T, uint32_t>();
TestUnsupported<T, int64_t>();
TestUnsupported<T, uint64_t>();
TestUnsupported<T, float16_t>();
TestUnsupported<T, bfloat16_t>();
TestUnsupported<T, float32_t>();
TestUnsupported<T, float64_t>();
TestUnsupported<T, complex64_t>();
TestUnsupported<T, complex128_t>();
EXPECT_EQ(value.utf8, TestConversion<string_t>(
value, kSafeAndImplicit | kCanReinterpretCast));
EXPECT_EQ(value,
TestConversion<ustring_t>(
value, kSafeAndImplicit | kCanReinterpretCast | kIdentity));
EXPECT_EQ(json_t("test"), TestConversion<json_t>(value, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Json) {
EXPECT_THAT(TestConversion<bool_t>(json_t("hello")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<bool_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int4_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int8_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int16_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int32_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int64_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<uint8_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<uint16_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<uint32_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<uint64_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<float16_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<bfloat16_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<float32_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<float64_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<ustring_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_EQ(false, TestConversion<bool_t>(json_t(false)));
EXPECT_EQ(false, TestConversion<bool_t>(json_t("false")));
EXPECT_EQ(true, TestConversion<bool_t>(json_t(true)));
EXPECT_EQ(true, TestConversion<bool_t>(json_t("true")));
EXPECT_EQ(int4_t(-8), TestConversion<int4_t>(json_t(-8)));
EXPECT_EQ(int8_t(58), TestConversion<int8_t>(json_t(58)));
EXPECT_EQ(int16_t(1234), TestConversion<int16_t>(json_t(1234)));
EXPECT_EQ(int16_t(1234), TestConversion<int16_t>(json_t("1234")));
EXPECT_EQ(int32_t(123456789), TestConversion<int32_t>(json_t(123456789)));
EXPECT_EQ(int64_t(1234567890123),
TestConversion<int64_t>(json_t(1234567890123)));
EXPECT_EQ(uint8_t(254), TestConversion<uint8_t>(json_t(254u)));
EXPECT_EQ(uint16_t(45123), TestConversion<uint16_t>(json_t(45123u)));
EXPECT_EQ(uint32_t(4012356789),
TestConversion<uint32_t>(json_t(4012356789u)));
EXPECT_EQ(uint64_t(40123567891234),
TestConversion<uint64_t>(json_t(40123567891234)));
EXPECT_EQ(float16_t(42.5), TestConversion<float16_t>(json_t(42.5)));
EXPECT_EQ(float16_t(42.5), TestConversion<float16_t>(json_t("42.5")));
EXPECT_EQ(bfloat16_t(42.5), TestConversion<bfloat16_t>(json_t(42.5)));
EXPECT_EQ(bfloat16_t(42.5), TestConversion<bfloat16_t>(json_t("42.5")));
EXPECT_EQ(float32_t(42.5), TestConversion<float32_t>(json_t(42.5)));
EXPECT_EQ(float64_t(42.5), TestConversion<float64_t>(json_t(42.5)));
EXPECT_EQ(float64_t(42.5), TestConversion<float64_t>(json_t("42.5")));
TestUnsupported<json_t, complex64_t>();
TestUnsupported<json_t, complex128_t>();
EXPECT_EQ("hello", TestConversion<string_t>(json_t("hello")));
EXPECT_THAT(TestConversion<string_t>(json_t(7)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t(true)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t(1.5)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t::array({2, 3})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_EQ(ustring_t{"hello"}, TestConversion<ustring_t>(json_t("hello")));
EXPECT_EQ(json_t("hello"), TestConversion<json_t>(
json_t("hello"), kSafeAndImplicit | kIdentity |
kCanReinterpretCast));
}
TEST(GetDataTypeConverterOrErrorTest, Basic) {
TENSORSTORE_EXPECT_OK(
GetDataTypeConverterOrError(dtype_v<int32_t>, dtype_v<int32_t>));
TENSORSTORE_EXPECT_OK(GetDataTypeConverterOrError(
dtype_v<int32_t>, dtype_v<int32_t>, kIdentity));
TENSORSTORE_EXPECT_OK(GetDataTypeConverterOrError(
dtype_v<int32_t>, dtype_v<int64_t>, kSafeAndImplicit));
TENSORSTORE_EXPECT_OK(GetDataTypeConverterOrError(
dtype_v<int32_t>, dtype_v<uint32_t>, kCanReinterpretCast));
EXPECT_THAT(
GetDataTypeConverterOrError(dtype_v<json_t>, dtype_v<complex64_t>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot convert json -> complex64"));
EXPECT_THAT(
GetDataTypeConverterOrError(dtype_v<uint32_t>, dtype_v<int32_t>,
kSafeAndImplicit),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Explicit data type conversion required to convert uint32 -> int32"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/data_type_conversion.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/data_type_conversion_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
bd4ce84d-8c2b-4f27-a3b4-90b861b3d323 | cpp | google/tensorstore | constant_bit_vector | tensorstore/util/constant_bit_vector.h | tensorstore/util/constant_bit_vector_test.cc | #ifndef TENSORSTORE_UTIL_CONSTANT_BIT_VECTOR_H_
#define TENSORSTORE_UTIL_CONSTANT_BIT_VECTOR_H_
#include <cstddef>
#include <type_traits>
#include "tensorstore/util/bit_span.h"
#include "tensorstore/util/constant_vector.h"
namespace tensorstore {
template <typename Block, bool value, std::ptrdiff_t Length>
constexpr BitSpan<const Block, Length> GetConstantBitVector(
std::integral_constant<std::ptrdiff_t, Length> = {}) {
return {GetConstantVector<
Block, (value ? ~static_cast<Block>(0) : static_cast<Block>(0)),
BitVectorSizeInBlocks<Block>(Length)>()
.data(),
0, Length};
}
template <typename Block, bool value>
BitSpan<const Block> GetConstantBitVector(std::ptrdiff_t length) {
return {GetConstantVector<Block, (value ? ~static_cast<Block>(0)
: static_cast<Block>(0))>(
BitVectorSizeInBlocks<Block>(length))
.data(),
0, length};
}
}
#endif | #include "tensorstore/util/constant_bit_vector.h"
#include <cstdint>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/bit_span.h"
namespace {
using ::tensorstore::BitSpan;
using ::tensorstore::GetConstantBitVector;
TEST(GetConstantBitVectorTest, StaticExtentFalse) {
constexpr auto v = GetConstantBitVector<uint64_t, false, 113>();
static_assert(
std::is_same_v<decltype(v), const BitSpan<const uint64_t, 113>>);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(113, false)));
}
TEST(GetConstantBitVectorTest, StaticExtentTrue) {
constexpr auto v = GetConstantBitVector<uint64_t, true, 113>();
static_assert(
std::is_same_v<decltype(v), const BitSpan<const uint64_t, 113>>);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(113, true)));
}
TEST(GetConstantBitVectorTest, DynamicExtentFalse) {
auto v = GetConstantBitVector<uint64_t, false>(113);
static_assert(std::is_same_v<decltype(v), BitSpan<const uint64_t>>);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(113, false)));
}
TEST(GetConstantBitVectorTest, DynamicExtentTrue) {
auto v = GetConstantBitVector<uint64_t, true>(113);
static_assert(std::is_same_v<decltype(v), BitSpan<const uint64_t>>);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(113, true)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/constant_bit_vector.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/constant_bit_vector_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5aa739c9-6249-4747-abc4-3b74415156d6 | cpp | google/tensorstore | bit_vec | tensorstore/util/bit_vec.h | tensorstore/util/bit_vec_test.cc | #ifndef TENSORSTORE_UTIL_BIT_VEC_H_
#define TENSORSTORE_UTIL_BIT_VEC_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstring>
#include <type_traits>
#include "absl/base/attributes.h"
#include "tensorstore/util/bit_span.h"
#include "tensorstore/util/bit_vec_impl.h"
#include "tensorstore/util/small_bit_set.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
template <ptrdiff_t Extent = dynamic_extent>
class BitVec {
using Storage = internal_bitvec::BitVecStorage<Extent>;
public:
using Block = internal_bitvec::Block;
using value_type = bool;
using difference_type = ptrdiff_t;
using size_type = ptrdiff_t;
using reference = BitRef<Block>;
using const_reference = BitRef<const Block>;
using iterator = BitIterator<Block>;
using const_iterator = BitIterator<const Block>;
static constexpr ptrdiff_t static_extent = Extent;
static constexpr ptrdiff_t static_block_extent =
Extent == dynamic_extent ? dynamic_extent
: BitVectorSizeInBlocks<Block>(Extent);
using ExtentType = typename Storage::ExtentType;
using BlockExtentType = typename Storage::BlockExtentType;
BitVec() : BitVec(ExtentType{}) {}
template <ptrdiff_t OtherExtent,
typename = std::enable_if_t<(OtherExtent == Extent ||
Extent == dynamic_extent)> >
BitVec(const bool (&arr)[OtherExtent])
: storage_(std::integral_constant<ptrdiff_t, OtherExtent>{}) {
std::copy(arr, arr + OtherExtent, begin());
}
template <typename OtherBlock, ptrdiff_t OtherExtent,
typename = std::enable_if_t<(OtherExtent == Extent ||
Extent == dynamic_extent)> >
explicit BitVec(BitSpan<OtherBlock, OtherExtent> other)
: storage_(other.size()) {
this->bit_span().DeepAssign(other);
}
template <ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
Extent == dynamic_extent)>* = nullptr>
BitVec(const BitVec<OtherExtent>& other) : storage_(other.size()) {
this->bit_span().DeepAssign(other.bit_span());
}
explicit BitVec(ExtentType extent, bool value = false) : storage_(extent) {
fill(value);
}
void resize(ExtentType new_size, bool value = false) {
storage_.resize(new_size, value);
}
void fill(bool value) {
std::memset(
storage_.data(),
value ? ~static_cast<unsigned char>(0) : static_cast<unsigned char>(0),
storage_.num_blocks() * sizeof(Block));
}
tensorstore::span<const Block, static_block_extent> blocks() const {
return {storage_.data(), storage_.num_blocks()};
}
tensorstore::span<Block, static_block_extent> blocks() {
return {storage_.data(), storage_.num_blocks()};
}
ExtentType size() const { return storage_.size(); }
bool empty() const { return size() == 0; }
template <ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
OtherExtent == dynamic_extent)>* = nullptr>
operator BitSpan<const Block, OtherExtent>() const {
return {storage_.data(), 0, size()};
}
template <ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
OtherExtent == dynamic_extent)>* = nullptr>
operator BitSpan<Block, OtherExtent>() {
return {storage_.data(), 0, size()};
}
BitSpan<const Block, Extent> bit_span() const { return *this; }
BitSpan<Block, Extent> bit_span() { return *this; }
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<const Block> begin() const {
return {storage_.data(), 0};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<const Block> cbegin() const {
return {storage_.data(), 0};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<const Block> end() const {
return {storage_.data(), storage_.size()};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<const Block> cend() const {
return {storage_.data(), storage_.size()};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<Block> begin() {
return {storage_.data(), 0};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<Block> end() {
return {storage_.data(), storage_.size()};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitRef<const Block> operator[](
ptrdiff_t i) const {
return assert(i >= 0 && i <= size()), *(begin() + i);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitRef<Block> operator[](ptrdiff_t i) {
return assert(i >= 0 && i <= size()), *(begin() + i);
}
friend bool operator==(const BitVec& a, const BitVec& b) {
const ptrdiff_t size = a.size();
if (size != b.size()) return false;
const ptrdiff_t full_blocks = size / (sizeof(Block) * 8);
const Block* a_data = a.storage_.data();
const Block* b_data = b.storage_.data();
if (!std::equal(a_data, a_data + full_blocks, b_data)) {
return false;
}
const Block final_mask =
(static_cast<Block>(1) << (size % (sizeof(Block) * 8))) - 1;
return (a_data[full_blocks] & final_mask) ==
(b_data[full_blocks] & final_mask);
}
friend bool operator!=(const BitVec& a, const BitVec& b) { return !(a == b); }
private:
Storage storage_;
};
template <ptrdiff_t Extent>
BitVec(const bool (&arr)[Extent]) -> BitVec<Extent>;
template <typename Block, ptrdiff_t Extent>
BitVec(BitSpan<Block, Extent>) -> BitVec<Extent>;
}
#endif | #include "tensorstore/util/bit_vec.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/bit_span.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::BitSpan;
using ::tensorstore::BitVec;
static_assert(!std::is_convertible_v<BitSpan<uint64_t, 3>, BitVec<>>);
static_assert(std::is_constructible_v<BitVec<3>, BitSpan<uint32_t, 3>>);
static_assert(std::is_constructible_v<BitVec<>, BitSpan<uint32_t, 3>>);
static_assert(!std::is_constructible_v<BitVec<3>, BitVec<>>);
static_assert(!std::is_constructible_v<BitVec<3>, BitSpan<uint32_t>>);
static_assert(!std::is_constructible_v<BitVec<3>, BitSpan<uint32_t, 4>>);
static_assert(!std::is_constructible_v<BitVec<3>, BitVec<4>>);
TEST(BitVecTest, StaticDefaultConstruct) {
BitVec<9> v;
EXPECT_THAT(v, ::testing::ElementsAre(0, 0, 0, 0, 0, 0, 0, 0, 0));
}
TEST(BitVecTest, StaticConstructTrue) {
BitVec<9> v({}, true);
EXPECT_THAT(v, ::testing::ElementsAre(1, 1, 1, 1, 1, 1, 1, 1, 1));
}
TEST(BitVecTest, DynamicDefaultConstruct) {
BitVec<> v;
EXPECT_EQ(0, v.size());
EXPECT_TRUE(v.empty());
v.resize(65);
EXPECT_FALSE(v.empty());
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(65, false)));
v.fill(true);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(65, true)));
}
TEST(BitVecTest, DynamicConstructFalse) {
BitVec<> v(65);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(65, false)));
}
TEST(BitVecTest, Subscript) {
BitVec<> v(9);
const auto& v_ref = v;
EXPECT_FALSE(v_ref[3]);
v[3] = true;
EXPECT_TRUE(v_ref[3]);
v[5] = true;
v[6] = true;
EXPECT_THAT(v, ::testing::ElementsAre(0, 0, 0, 1, 0, 1, 1, 0, 0));
v[8] = true;
EXPECT_THAT(v, ::testing::ElementsAre(0, 0, 0, 1, 0, 1, 1, 0, 1));
v[3] = false;
EXPECT_THAT(v, ::testing::ElementsAre(0, 0, 0, 0, 0, 1, 1, 0, 1));
}
TEST(BitVecTest, CopyConstructInline) {
BitVec<> a(9);
a[0] = true;
a[3] = true;
a[5] = true;
a[6] = true;
BitVec<> b(a);
EXPECT_THAT(a, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
}
TEST(BitVecTest, CopyConstructLarge) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(a);
EXPECT_THAT(a, ::testing::ElementsAreArray(expected));
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, MoveConstructInline) {
BitVec<> a(9);
a[0] = true;
a[3] = true;
a[5] = true;
a[6] = true;
BitVec<> b(std::move(a));
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
}
TEST(BitVecTest, MoveConstructLarge) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(std::move(a));
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, CopyAssignInline) {
BitVec<> a(9);
a[0] = true;
a[3] = true;
a[5] = true;
a[6] = true;
BitVec<> b(9);
b = a;
EXPECT_THAT(a, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
}
TEST(BitVecTest, CopyAssignLargeSameNumBlocks) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(129);
b = a;
EXPECT_THAT(a, ::testing::ElementsAreArray(expected));
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, CopyAssignLargeDifferentNumBlocks) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(65);
b = a;
EXPECT_THAT(a, ::testing::ElementsAreArray(expected));
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, MoveAssignInline) {
BitVec<> a(9);
a[0] = true;
a[3] = true;
a[5] = true;
a[6] = true;
BitVec<> b(9);
b = std::move(a);
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
}
TEST(BitVecTest, MoveAssignLarge) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(129);
b = std::move(a);
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, BracedListConstruct) {
BitVec<> a({1, 0, 0, 1, 1});
EXPECT_THAT(a, ::testing::ElementsAre(1, 0, 0, 1, 1));
}
TEST(BitVecTest, DeduceBitVec) {
auto a = BitVec({true, false, false, true, true});
EXPECT_THAT(a, ::testing::ElementsAre(1, 0, 0, 1, 1));
static_assert(std::is_same_v<decltype(a), BitVec<5>>);
auto b = BitVec(a.bit_span());
static_assert(std::is_same_v<decltype(b), BitVec<5>>);
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 1));
}
TEST(BitVecTest, BitSpanConstruct) {
BitVec<> a(37);
a[32] = 1;
a[17] = 1;
a[2] = 1;
EXPECT_THAT(a, ::testing::ElementsAre(0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0));
BitVec<> b(a.bit_span());
EXPECT_THAT(b, ::testing::ElementsAre(0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0));
}
TEST(BitVecTest, BitVecConvertConstruct) {
BitVec<37> a;
a[32] = 1;
a[17] = 1;
a[2] = 1;
EXPECT_THAT(a, ::testing::ElementsAre(0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0));
BitVec<> b = a;
EXPECT_THAT(b, ::testing::ElementsAre(0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0));
}
TEST(BitVecTest, ComparisonShort) {
BitVec<> a(18);
BitVec<> b(17);
EXPECT_NE(a, b);
b.resize(18);
EXPECT_EQ(a, b);
b[2] = true;
EXPECT_NE(a, b);
a[2] = true;
EXPECT_EQ(a, b);
a[17] = true;
EXPECT_NE(a, b);
b[17] = true;
EXPECT_EQ(a, b);
}
TEST(BitVecTest, ComparisonLong) {
BitVec<> a(150);
BitVec<> b(151);
EXPECT_NE(a, b);
b.resize(150);
EXPECT_EQ(a, b);
b[2] = true;
EXPECT_NE(a, b);
a[2] = true;
EXPECT_EQ(a, b);
a[149] = true;
EXPECT_NE(a, b);
b[149] = true;
EXPECT_EQ(a, b);
}
TEST(BitVecTest, ConstIterators) {
BitVec<> a(7);
a[1] = 1;
a[4] = 1;
{
const auto& a_ref = a;
std::vector<bool> b(a_ref.begin(), a_ref.end());
EXPECT_THAT(b, ::testing::ElementsAre(0, 1, 0, 0, 1, 0, 0));
}
{
std::vector<bool> b(a.cbegin(), a.cend());
EXPECT_THAT(b, ::testing::ElementsAre(0, 1, 0, 0, 1, 0, 0));
}
}
TEST(BitVecTest, NonConstIterators) {
BitVec<> a(7);
a[1] = 1;
a[4] = 1;
std::vector<bool> b(a.begin(), a.end());
EXPECT_THAT(b, ::testing::ElementsAre(0, 1, 0, 0, 1, 0, 0));
}
TEST(BitVecTest, NonConstIteratorsMutate) {
BitVec<> a(7);
std::vector<bool> b{0, 1, 0, 0, 1, 0, 0};
std::copy(b.begin(), b.end(), a.begin());
EXPECT_THAT(a, ::testing::ElementsAre(0, 1, 0, 0, 1, 0, 0));
}
TEST(BitVecTest, BlocksInline) {
BitVec<> a(64);
for (int i : {0, 5, 17, 62}) {
a[i] = true;
}
EXPECT_THAT(a.blocks(), ::testing::ElementsAre(
(uint64_t(1) << 0) |
(uint64_t(1) << 5) |
(uint64_t(1) << 17) |
(uint64_t(1) << 62)));
}
TEST(BitVecTest, BlocksLarge) {
BitVec<> a(128);
for (int i : {0, 5, 17, 62, 90, 127}) {
a[i] = true;
}
EXPECT_THAT(a.blocks(),
::testing::ElementsAre(
(uint64_t(1) << 0) |
(uint64_t(1) << 5) |
(uint64_t(1) << 17) |
(uint64_t(1) << 62),
(uint64_t(1) << (90 - 64)) |
(uint64_t(1) << (127 - 64))));
}
TEST(BitVecTest, ResizeStatic) {
BitVec<65> b;
std::vector<bool> expected(65);
for (int i : {0, 3, 7, 29, 35, 64}) {
expected[i] = true;
b[i] = true;
}
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
b.resize(std::integral_constant<std::ptrdiff_t, 65>{});
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
void TestResizeDynamic(std::ptrdiff_t orig_size, std::ptrdiff_t new_size,
std::vector<int> bits) {
SCOPED_TRACE(tensorstore::StrCat("orig_size=", orig_size,
", new_size=", new_size,
", bits=", ::testing::PrintToString(bits)));
BitVec<> b(orig_size);
std::vector<bool> expected(orig_size);
for (int i : bits) {
expected[i] = true;
b[i] = true;
}
std::vector<bool> expected_resize_false = expected;
expected_resize_false.resize(new_size, false);
std::vector<bool> expected_resize_true = expected;
expected_resize_true.resize(new_size, true);
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
BitVec<> b_resize_false = b;
b_resize_false.resize(new_size, false);
BitVec<> b_resize_true = b;
b_resize_true.resize(new_size, true);
EXPECT_THAT(b_resize_false,
::testing::ElementsAreArray(expected_resize_false));
EXPECT_THAT(b_resize_true, ::testing::ElementsAreArray(expected_resize_true));
}
TEST(BitVecTest, ResizeDynamicLargeNoOp) {
TestResizeDynamic(65, 65, {0, 3, 7, 29, 35, 64});
}
TEST(BitVecTest, ResizeDynamicInlineNoOp) {
TestResizeDynamic(62, 62, {0, 3, 7, 29, 35, 61});
}
TEST(BitVecTest, ResizeDynamicInlineShrink) {
TestResizeDynamic(62, 30, {0, 3, 7, 29, 35, 61});
}
TEST(BitVecTest, ResizeDynamicInlineExpand) {
TestResizeDynamic(36, 41, {0, 3, 7, 29, 35});
}
TEST(BitVecTest, ResizeDynamicShrinkSameNumBlocks) {
TestResizeDynamic(150, 132, {0, 3, 7, 29, 35, 64, 127, 131, 149});
}
TEST(BitVecTest, ResizeDynamicExpandSameNumBlocks) {
TestResizeDynamic(150, 160, {0, 3, 7, 29, 35, 64, 127, 131, 149});
}
TEST(BitVecTest, ResizeDynamicShrinkDifferentNumBlocks) {
TestResizeDynamic(150, 128, {0, 3, 7, 29, 35, 64, 127, 131, 149});
TestResizeDynamic(150, 126, {0, 3, 7, 29, 35, 64, 127, 131, 149});
}
TEST(BitVecTest, ResizeDynamicExpandDifferentNumBlocks) {
TestResizeDynamic(150, 250, {0, 3, 7, 29, 35, 64, 127, 131, 149});
}
TEST(BitVecTest, ResizeDynamicExpandFromEmpty) {
TestResizeDynamic(0, 15, {});
TestResizeDynamic(0, 65, {});
TestResizeDynamic(0, 150, {});
TestResizeDynamic(0, 0, {});
}
TEST(BitVecTest, ResizeDynamicShrinkToEmpty) {
TestResizeDynamic(13, 0, {1, 2, 12});
TestResizeDynamic(129, 0, {1, 2, 12, 65, 73, 128});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bit_vec.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bit_vec_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
812071a9-cc21-46f8-accc-2f96d4671015 | cpp | google/tensorstore | small_bit_set | tensorstore/util/small_bit_set.h | tensorstore/util/small_bit_set_test.cc | #ifndef TENSORSTORE_UTIL_SMALL_BIT_SET_H_
#define TENSORSTORE_UTIL_SMALL_BIT_SET_H_
#include <stddef.h>
#include <cassert>
#include <iterator>
#include <ostream>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/numeric/bits.h"
#include "tensorstore/internal/integer_types.h"
namespace tensorstore {
template <typename T>
class BitRef {
static_assert(std::is_unsigned_v<T>, "Storage type T must be unsigned.");
public:
friend class BitRef<const T>;
using block_type = T;
using value_type = bool;
using element_type = bool;
constexpr static ptrdiff_t kBitsPerBlock = sizeof(T) * 8;
constexpr BitRef(T* block ABSL_ATTRIBUTE_LIFETIME_BOUND, ptrdiff_t offset)
: block_(block), mask_(static_cast<T>(1) << (offset % kBitsPerBlock)) {
assert(offset >= 0);
}
constexpr operator bool() const { return *block_ & mask_; }
const BitRef& operator=(bool value) const {
*block_ = value ? (*block_ | mask_) : (*block_ & ~mask_);
return *this;
}
const BitRef& operator=(BitRef value) const {
return (*this = static_cast<bool>(value));
}
friend void swap(BitRef a, bool& x) {
bool temp = a;
a = x;
x = temp;
}
friend void swap(bool& x, BitRef a) {
bool temp = a;
a = x;
x = temp;
}
private:
T* block_;
T mask_;
};
template <typename T, typename U>
std::enable_if_t<(!std::is_const_v<T> && !std::is_const_v<U>)> swap(
BitRef<T> a, BitRef<U> b) {
bool temp = a;
a = b;
b = temp;
}
template <typename T>
std::enable_if_t<(!std::is_const_v<T>)> swap(BitRef<T> a, BitRef<T> b) {
bool temp = a;
a = b;
b = temp;
}
template <typename T>
class BitIterator {
static_assert(std::is_unsigned_v<T>, "Storage type T must be unsigned.");
public:
using pointer = BitIterator<T>;
using const_pointer = BitIterator<const T>;
using reference = BitRef<T>;
using const_reference = BitRef<const T>;
using difference_type = ptrdiff_t;
using value_type = bool;
using iterator_category = std::random_access_iterator_tag;
constexpr static ptrdiff_t kBitsPerBlock = sizeof(T) * 8;
constexpr BitIterator() : base_(nullptr), offset_(0) {}
constexpr BitIterator(T* base ABSL_ATTRIBUTE_LIFETIME_BOUND, ptrdiff_t offset)
: base_(base), offset_(offset) {}
template <typename U, std::enable_if_t<std::is_same_v<const U, T>>* = nullptr>
constexpr BitIterator(BitIterator<U> other)
: base_(other.base()), offset_(other.offset()) {}
constexpr T* base() const { return base_; }
constexpr ptrdiff_t offset() const { return offset_; }
constexpr BitRef<T> operator*() const {
return BitRef<T>(base() + offset() / kBitsPerBlock, offset());
}
constexpr BitRef<T> operator[](ptrdiff_t offset) const {
return *(*this + offset);
}
BitIterator& operator++() {
++offset_;
return *this;
}
BitIterator& operator--() {
--offset_;
return *this;
}
BitIterator operator++(int) {
BitIterator temp = *this;
++offset_;
return temp;
}
BitIterator operator--(int) {
BitIterator temp = *this;
--offset_;
return temp;
}
friend BitIterator operator+(BitIterator it, ptrdiff_t offset) {
it += offset;
return it;
}
friend BitIterator operator+(ptrdiff_t offset, BitIterator it) {
it += offset;
return it;
}
BitIterator& operator+=(ptrdiff_t x) {
offset_ += x;
return *this;
}
friend BitIterator operator-(BitIterator it, ptrdiff_t offset) {
it -= offset;
return it;
}
BitIterator& operator-=(ptrdiff_t x) {
offset_ -= x;
return *this;
}
friend constexpr ptrdiff_t operator-(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() - b.offset();
}
friend constexpr bool operator==(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() == b.offset();
}
friend constexpr bool operator!=(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() != b.offset();
}
friend constexpr bool operator<(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() < b.offset();
}
friend constexpr bool operator<=(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() <= b.offset();
}
friend constexpr bool operator>(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() > b.offset();
}
friend constexpr bool operator>=(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() >= b.offset();
}
private:
T* base_;
ptrdiff_t offset_;
};
namespace bitset_impl {
template <typename Iterator, size_t N>
class BoolsView {
public:
using iterator = Iterator;
using value_type = typename iterator::value_type;
using difference_type = typename iterator::difference_type;
using reference = typename iterator::reference;
explicit BoolsView(iterator it) : it_(std::move(it)) {}
constexpr iterator begin() const { return it_; }
constexpr iterator end() const { return iterator(it_.base(), N); }
private:
iterator it_;
};
template <typename Uint>
class OneBitsIterator {
public:
using value_type = int;
using difference_type = int;
using reference = int;
OneBitsIterator() : value_(0) {}
explicit OneBitsIterator(Uint value) : value_(value) {}
friend constexpr bool operator==(OneBitsIterator a, OneBitsIterator b) {
return a.value_ == b.value_;
}
friend constexpr bool operator!=(OneBitsIterator a, OneBitsIterator b) {
return !(a == b);
}
constexpr int operator*() const { return absl::countr_zero(value_); }
constexpr OneBitsIterator& operator++() {
Uint t = value_ & -value_;
value_ ^= t;
return *this;
}
constexpr OneBitsIterator operator++(int) {
auto copy = *this;
++*this;
return copy;
}
private:
Uint value_;
};
template <typename Uint>
class IndexView {
public:
IndexView(Uint bits) : bits_(bits) {}
using const_iterator = OneBitsIterator<Uint>;
using value_type = typename const_iterator::value_type;
using difference_type = typename const_iterator::difference_type;
using reference = typename const_iterator::reference;
constexpr const_iterator begin() const { return const_iterator(bits_); }
constexpr const_iterator end() const { return const_iterator(); }
constexpr int front() const { return *begin(); }
private:
Uint bits_;
};
}
template <size_t N>
class SmallBitSet {
public:
using Uint = typename internal::uint_type<N>::type;
using value_type = bool;
using reference = BitRef<Uint>;
constexpr SmallBitSet() : bits_(0) {}
template <typename T,
typename = std::enable_if_t<std::is_same_v<T, bool>>>
constexpr SmallBitSet(T value) : bits_(value * ~Uint(0)) {}
static constexpr SmallBitSet FromUint(Uint bits) {
SmallBitSet v;
v.bits_ = bits;
return v;
}
template <size_t NumBits, typename = std::enable_if_t<(NumBits <= N)>>
static constexpr SmallBitSet FromIndices(const int (&positions)[NumBits]) {
return FromIndexRange(std::begin(positions), std::end(positions));
}
template <typename Range>
static constexpr SmallBitSet FromIndexRange(Range&& range) {
return FromIndexRange(range.begin(), range.end());
}
template <typename Iterator>
static constexpr SmallBitSet FromIndexRange(Iterator begin, Iterator end) {
SmallBitSet set;
while (begin != end) set.set(*begin++);
return set;
}
template <size_t NumBits, typename = std::enable_if_t<(NumBits <= N)>>
static constexpr SmallBitSet FromBools(const bool (&bits)[NumBits]) {
return FromBoolRange(std::begin(bits), std::end(bits));
}
template <typename Range>
static constexpr SmallBitSet FromBoolRange(Range&& range) {
return FromBoolRange(range.begin(), range.end());
}
template <typename Iterator>
static constexpr SmallBitSet FromBoolRange(Iterator begin, Iterator end) {
SmallBitSet set;
size_t i = 0;
while (begin != end) {
set.bits_ |= (*begin++ ? Uint(1) : Uint(0)) << i;
i++;
}
assert(i <= N);
return set;
}
static constexpr SmallBitSet UpTo(size_t k) {
assert(k <= N);
return k == 0 ? SmallBitSet()
: SmallBitSet::FromUint(~Uint(0) << (N - k) >> (N - k));
}
template <typename T,
typename = std::enable_if_t<std::is_same_v<T, bool>>>
constexpr SmallBitSet& operator=(T value) {
bits_ = ~Uint(0) * value;
return *this;
}
using BoolsView = bitset_impl::BoolsView<BitIterator<Uint>, N>;
constexpr BoolsView bools_view() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return BoolsView(BitIterator<Uint>(&bits_, 0));
}
using ConstBoolsView = bitset_impl::BoolsView<BitIterator<const Uint>, N>;
constexpr ConstBoolsView bools_view() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return ConstBoolsView(BitIterator<const Uint>(&bits_, 0));
}
using IndexView = bitset_impl::IndexView<Uint>;
constexpr IndexView index_view() const { return IndexView(bits_); }
constexpr static size_t size() { return N; }
constexpr size_t count() const { return absl::popcount(bits_); }
constexpr bool none() const { return bits_ == 0; }
constexpr bool any() const { return bits_ != 0; }
constexpr bool all() const { return bits_ == ~Uint(0); }
explicit operator bool() const { return any(); }
constexpr SmallBitSet& set() noexcept {
bits_ = ~Uint(0);
return *this;
}
constexpr SmallBitSet& reset() noexcept {
bits_ = 0;
return *this;
}
constexpr SmallBitSet& flip() noexcept {
bits_ = ~bits_;
return *this;
}
constexpr bool test(int pos) const noexcept {
assert(pos >= 0 && pos < N);
return (bits_ >> pos) & 1;
}
constexpr SmallBitSet& set(int pos) noexcept {
assert(pos >= 0 && pos < N);
bits_ |= (static_cast<Uint>(1) << pos);
return *this;
}
constexpr SmallBitSet& reset(int pos) noexcept {
assert(pos >= 0 && pos < N);
bits_ &= ~(static_cast<Uint>(1) << pos);
return *this;
}
constexpr SmallBitSet& flip(int pos) noexcept {
assert(pos >= 0 && pos < N);
bits_ ^= (static_cast<Uint>(1) << pos);
return *this;
}
constexpr reference operator[](size_t offset) ABSL_ATTRIBUTE_LIFETIME_BOUND {
assert(offset >= 0 && offset < N);
return reference(&bits_, offset);
}
constexpr bool operator[](size_t offset) const {
assert(offset >= 0 && offset < N);
return test(offset);
}
constexpr Uint to_uint() const { return bits_; }
friend constexpr SmallBitSet operator~(SmallBitSet v) {
return SmallBitSet::FromUint(~v.bits_);
}
friend constexpr SmallBitSet operator&(SmallBitSet a, SmallBitSet b) {
return SmallBitSet::FromUint(a.bits_ & b.bits_);
}
friend constexpr SmallBitSet& operator&=(SmallBitSet& a, SmallBitSet b) {
a.bits_ &= b.bits_;
return a;
}
friend constexpr SmallBitSet operator^(SmallBitSet a, SmallBitSet b) {
return SmallBitSet::FromUint(a.bits_ ^ b.bits_);
}
friend constexpr SmallBitSet& operator^=(SmallBitSet& a, SmallBitSet b) {
a.bits_ ^= b.bits_;
return a;
}
friend constexpr SmallBitSet operator|(SmallBitSet a, SmallBitSet b) {
return SmallBitSet::FromUint(a.bits_ | b.bits_);
}
friend constexpr SmallBitSet& operator|=(SmallBitSet& a, SmallBitSet b) {
a.bits_ |= b.bits_;
return a;
}
friend constexpr bool operator==(SmallBitSet a, SmallBitSet b) {
return a.bits_ == b.bits_;
}
friend constexpr bool operator!=(SmallBitSet a, SmallBitSet b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os, SmallBitSet v) {
for (size_t i = 0; i < N; ++i) {
os << (static_cast<bool>(v[i]) ? '1' : '0');
}
return os;
}
private:
Uint bits_;
};
}
#endif | #include "tensorstore/util/small_bit_set.h"
#include <stdint.h>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::BitIterator;
using ::tensorstore::BitRef;
using BitSet = ::tensorstore::SmallBitSet<32>;
static_assert(
std::is_convertible_v<BitIterator<uint32_t>, BitIterator<const uint32_t>>);
static_assert(
!std::is_convertible_v<BitIterator<const uint32_t>, BitIterator<uint32_t>>);
TEST(BitRefTest, Basic) {
uint16_t data[2] = {0, 0};
BitRef<uint16_t> ref(data + 1, 19);
BitRef<uint16_t> ref2(data, 2);
BitRef<const uint16_t> const_ref(data, 3);
EXPECT_EQ(false, ref);
ref = true;
EXPECT_EQ(true, ref);
EXPECT_THAT(data, ::testing::ElementsAre(0, 8));
data[0] = 0xffff ;
data[1] = 0xffff ;
EXPECT_EQ(true, ref);
ref = false;
EXPECT_EQ(false, ref);
EXPECT_THAT(data, ::testing::ElementsAre(0xffff ,
0xfff7 ));
ref = ref2;
EXPECT_THAT(data, ::testing::ElementsAre(0xffff ,
0xffff ));
data[0] = 0;
ref = const_ref;
EXPECT_THAT(data, ::testing::ElementsAre(0, 0xfff7 ));
}
TEST(BitRefTest, Swap) {
uint16_t data[2] = {0, 0};
BitRef<uint16_t> ref(data + 1, 19);
BitRef<uint16_t> ref2(data, 2);
uint32_t data2 = 0;
ref = true;
ref2 = false;
EXPECT_THAT(data, ::testing::ElementsAre(0, 8));
using std::swap;
swap(ref, ref2);
EXPECT_EQ(false, ref);
EXPECT_EQ(true, ref2);
EXPECT_THAT(data, ::testing::ElementsAre(4, 0));
bool b = false;
swap(b, ref2);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0));
EXPECT_EQ(true, b);
swap(ref2, b);
EXPECT_THAT(data, ::testing::ElementsAre(4, 0));
EXPECT_EQ(false, b);
BitRef<uint32_t> ref3(&data2, 1);
swap(ref2, ref3);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0));
EXPECT_EQ(2, data2);
}
TEST(BitIteratorTest, Basic) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
BitIterator<uint16_t> it2(data, 2);
BitIterator<const uint16_t> const_it(data, 3);
BitIterator<const uint16_t> const_it2 = it;
EXPECT_EQ(data, it.base());
EXPECT_EQ(data, it2.base());
EXPECT_EQ(data, const_it.base());
EXPECT_EQ(data, const_it2.base());
EXPECT_EQ(19, it.offset());
EXPECT_EQ(2, it2.offset());
EXPECT_EQ(3, const_it.offset());
EXPECT_EQ(19, const_it2.offset());
{
auto ref = *it;
static_assert(std::is_same_v<BitRef<uint16_t>, decltype(ref)>);
auto ref_subscript = it[0];
auto ref_subscript2 = it2[17];
static_assert(std::is_same_v<BitRef<uint16_t>, decltype(ref_subscript)>);
EXPECT_FALSE(ref_subscript);
EXPECT_FALSE(ref_subscript2);
ref = true;
EXPECT_TRUE(ref);
EXPECT_TRUE(ref_subscript);
EXPECT_TRUE(ref_subscript2);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0x8 ));
ref = false;
EXPECT_FALSE(ref);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0));
data[1] = ~0x8;
EXPECT_FALSE(ref);
ref = true;
EXPECT_TRUE(ref);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0xffff));
}
{
auto ref = *const_it;
static_assert(std::is_same_v<BitRef<const uint16_t>, decltype(ref)>);
EXPECT_FALSE(ref);
data[0] = 0x8 ;
EXPECT_TRUE(ref);
data[0] = ~data[0];
EXPECT_FALSE(ref);
}
}
TEST(BitIteratorTest, IteratorPlusOffset) {
uint16_t data[2] = {0, 0};
auto it = BitIterator<uint16_t>(data, 3) + 5;
EXPECT_EQ(data, it.base());
EXPECT_EQ(8, it.offset());
}
TEST(BitIteratorTest, OffsetPlusIterator) {
uint16_t data[2] = {0, 0};
auto it = 5 + BitIterator<uint16_t>(data, 3);
EXPECT_EQ(data, it.base());
EXPECT_EQ(8, it.offset());
}
TEST(BitIteratorTest, IteratorMinusOffset) {
uint16_t data[2] = {0, 0};
auto it = BitIterator<uint16_t>(data, 7) - 2;
EXPECT_EQ(data, it.base());
EXPECT_EQ(5, it.offset());
}
TEST(BitIteratorTest, IteratorMinusIterator) {
uint16_t data[2] = {0, 0};
EXPECT_EQ(3, BitIterator<uint16_t>(data, 7) - BitIterator<uint16_t>(data, 4));
EXPECT_EQ(
3, BitIterator<uint16_t>(data, 7) - BitIterator<const uint16_t>(data, 4));
EXPECT_EQ(
3, BitIterator<const uint16_t>(data, 7) - BitIterator<uint16_t>(data, 4));
EXPECT_EQ(3, BitIterator<const uint16_t>(data, 7) -
BitIterator<const uint16_t>(data, 4));
}
TEST(BitIteratorTest, PreIncrement) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
auto& x = ++it;
EXPECT_EQ(&it, &x);
EXPECT_EQ(20, it.offset());
EXPECT_EQ(data, it.base());
}
TEST(BitIteratorTest, PreDecrement) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
auto& x = --it;
EXPECT_EQ(&it, &x);
EXPECT_EQ(18, it.offset());
EXPECT_EQ(data, it.base());
}
TEST(BitIteratorTest, PostIncrement) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
EXPECT_EQ(BitIterator<uint16_t>(data, 19), it++);
EXPECT_EQ(20, it.offset());
EXPECT_EQ(data, it.base());
}
TEST(BitIteratorTest, PostDecrement) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
EXPECT_EQ(BitIterator<uint16_t>(data, 19), it--);
EXPECT_EQ(18, it.offset());
EXPECT_EQ(data, it.base());
}
TEST(BitIteratorTest, Comparison) {
uint16_t data[2] = {0, 0};
EXPECT_EQ(BitIterator<uint16_t>(data, 3), BitIterator<uint16_t>(data, 3));
EXPECT_EQ(BitIterator<uint16_t>(data, 3),
BitIterator<const uint16_t>(data, 3));
EXPECT_NE(BitIterator<uint16_t>(data, 3), BitIterator<uint16_t>(data, 4));
EXPECT_NE(BitIterator<uint16_t>(data, 3),
BitIterator<const uint16_t>(data, 4));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) < BitIterator<uint16_t>(data, 4));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) <
BitIterator<const uint16_t>(data, 4));
EXPECT_FALSE(BitIterator<uint16_t>(data, 3) < BitIterator<uint16_t>(data, 3));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) <= BitIterator<uint16_t>(data, 4));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) <= BitIterator<uint16_t>(data, 3));
EXPECT_FALSE(BitIterator<uint16_t>(data, 3) <=
BitIterator<uint16_t>(data, 2));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) > BitIterator<uint16_t>(data, 2));
EXPECT_FALSE(BitIterator<uint16_t>(data, 3) > BitIterator<uint16_t>(data, 3));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) >= BitIterator<uint16_t>(data, 2));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) >= BitIterator<uint16_t>(data, 3));
EXPECT_FALSE(BitIterator<uint16_t>(data, 2) >=
BitIterator<uint16_t>(data, 3));
}
TEST(SmallBitSetTest, DefaultConstruct) {
BitSet v;
EXPECT_FALSE(v);
EXPECT_EQ(0, v.to_uint());
EXPECT_EQ(v, v);
BitSet v_true = true;
EXPECT_EQ(v_true, v_true);
EXPECT_NE(v, v_true);
EXPECT_THAT(v.bools_view(),
::testing::ElementsAreArray(std::vector<bool>(32)));
}
TEST(SmallBitSetTest, FromUint) {
auto v = BitSet::FromUint(0b11'0111);
EXPECT_TRUE(v);
EXPECT_EQ(0b110111, v.to_uint());
EXPECT_EQ(true, v[0]);
EXPECT_EQ(true, v[1]);
EXPECT_EQ(true, v[2]);
EXPECT_EQ(false, v[3]);
EXPECT_EQ(true, v[4]);
EXPECT_EQ(true, v[5]);
EXPECT_THAT(v.bools_view(), ::testing::ElementsAre(1, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0));
EXPECT_THAT(const_cast<const BitSet&>(v).bools_view(),
::testing::ElementsAre(1, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0));
EXPECT_EQ(
"11101100"
"00000000"
"00000000"
"00000000",
tensorstore::StrCat(v));
EXPECT_EQ(0b11111111'11111111'11111111'11001000, (~v).to_uint());
auto v1 = BitSet::FromUint(0b101'1100);
EXPECT_EQ(0b111'1111, (v | v1).to_uint());
EXPECT_EQ(0b001'0100, (v & v1).to_uint());
EXPECT_EQ(0b110'1011, (v ^ v1).to_uint());
auto v2 = v1;
v2 |= v;
EXPECT_EQ(0b111'1111, v2.to_uint());
v2 = v1;
v2 &= v;
EXPECT_EQ(0b001'0100, v2.to_uint());
v2 = v1;
v2 ^= v;
EXPECT_EQ(0b110'1011, v2.to_uint());
}
TEST(SmallBitSetTest, BracedList) {
auto v = BitSet::FromBools({0, 1, 1, 0, 0, 1});
EXPECT_EQ(0b100110, v.to_uint());
}
TEST(SmallBitSetTest, Reference) {
BitSet v;
v[2] = true;
EXPECT_TRUE(v[2]);
EXPECT_FALSE(v[0]);
EXPECT_EQ(0b100, v.to_uint());
}
TEST(SmallBitSetTest, UpTo) {
EXPECT_EQ(0x00000000, BitSet::UpTo(0).to_uint());
EXPECT_EQ(0x00000001, BitSet::UpTo(1).to_uint());
EXPECT_EQ(0x0000ffff, BitSet::UpTo(16).to_uint());
EXPECT_EQ(0x7fffffff, BitSet::UpTo(31).to_uint());
EXPECT_EQ(0xffffffff, BitSet::UpTo(32).to_uint());
EXPECT_EQ(1, BitSet::UpTo(1).count());
}
TEST(SmallBitSetTest, FromIndices) {
BitSet v = BitSet::FromIndices({1, 3, 10});
EXPECT_FALSE(v.none());
EXPECT_EQ(3, v.count());
EXPECT_EQ((static_cast<uint32_t>(1) << 1) | (static_cast<uint32_t>(1) << 3) |
(static_cast<uint32_t>(1) << 10),
v.to_uint());
EXPECT_THAT(v.index_view(), ::testing::ElementsAre(1, 3, 10));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/small_bit_set.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/small_bit_set_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7ea580bf-fc22-4f02-84c7-1813a00b67ee | cpp | google/tensorstore | rational | tensorstore/internal/json_binding/rational.h | tensorstore/internal/json_binding/rational_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_RATIONAL_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_RATIONAL_H_
#include <stddef.h>
#include <string>
#include <string_view>
#include <type_traits>
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/util/rational.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_json_binding {
namespace rational_binder {
struct RationalBinder {
template <typename Options, typename T>
absl::Status operator()(std::true_type is_loading, const Options& options,
Rational<T>* obj, ::nlohmann::json* j) const {
if (j->is_array()) {
T values[2];
span<T, 2> values_span(values);
TENSORSTORE_RETURN_IF_ERROR(
FixedSizeArray()(is_loading, options, &values_span, j));
*obj = Rational<T>(values[0], values[1]);
return absl::OkStatus();
} else if (auto* s = j->get_ptr<const std::string*>()) {
std::string_view sv = *s;
size_t slash_index = sv.find('/');
T numerator;
T denominator;
if (slash_index == std::string_view::npos) {
denominator = 1;
if (!absl::SimpleAtoi(sv, &numerator)) {
return internal_json::ExpectedError(
*j, "number or rational number `a/b`");
}
} else {
if (!absl::SimpleAtoi(sv.substr(0, slash_index), &numerator) ||
!absl::SimpleAtoi(sv.substr(slash_index + 1), &denominator)) {
return internal_json::ExpectedError(*j, "rational number `a/b`");
}
}
*obj = Rational<T>(numerator, denominator);
return absl::OkStatus();
}
T value;
TENSORSTORE_RETURN_IF_ERROR(
DefaultBinder<>(is_loading, options, &value, j));
*obj = value;
return absl::OkStatus();
}
template <typename Options, typename T>
absl::Status operator()(std::false_type is_loading, const Options& options,
const Rational<T>* obj, ::nlohmann::json* j) const {
if (obj->denominator() == static_cast<T>(1)) {
T num = obj->numerator();
return DefaultBinder<>(is_loading, options, &num, j);
}
*j = absl::StrFormat("%d/%d", obj->numerator(), obj->denominator());
return absl::OkStatus();
}
};
}
template <typename T>
constexpr inline auto DefaultBinder<Rational<T>> =
rational_binder::RationalBinder{};
}
}
#endif | #include "tensorstore/internal/json_binding/rational.h"
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/index.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/rational.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Rational;
namespace {
TEST(JsonBindingTest, Simple) {
tensorstore::TestJsonBinderRoundTrip<Rational<Index>>({
{{2, 3}, "2/3"},
{2, 2},
{1, 1},
{0, 0},
});
tensorstore::TestJsonBinderRoundTripJsonOnly<Rational<Index>>({
"2/0",
"3/0",
"0/0",
});
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<Rational<Index>>({
{{2, 3}, "2/3"},
});
tensorstore::TestJsonBinderFromJson<Rational<Index>>({
{"abc",
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected number or rational number `a/b`, but received: \"abc\"")},
{"12a",
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected number or rational number `a/b`, but received: \"12a\"")},
{"12/a",
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected rational number `a/b`, but received: \"12/a\"")},
{{1},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Array has length 1 but should have length 2")},
{{1, "a"},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 1: "
"Expected 64-bit signed integer, but received: \"a\"")},
{{1, 2, 3},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Array has length 3 but should have length 2")},
});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/rational.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/rational_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b9327b78-625c-4f69-a147-65f529cff80c | cpp | google/tensorstore | span_json | tensorstore/util/span_json.h | tensorstore/util/span_json_test.cc | #ifndef TENSORSTORE_UTIL_SPAN_JSON_H_
#define TENSORSTORE_UTIL_SPAN_JSON_H_
#include <cstddef>
#include <nlohmann/json.hpp>
#include "tensorstore/util/span.h"
namespace tensorstore {
template <typename T, ptrdiff_t Extent>
void to_json(::nlohmann::json& out,
tensorstore::span<T, Extent> s) {
out = ::nlohmann::json::array_t(s.begin(), s.end());
}
}
#endif | #include "tensorstore/util/span_json.h"
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::span;
TEST(SpanJsonTest, Basic) {
EXPECT_EQ(::nlohmann::json({1, 2, 3}),
::nlohmann::json(span<const int, 3>({1, 2, 3})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/span_json.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/span_json_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1cafbf81-77c4-417f-a640-ffaa19edce43 | cpp | google/tensorstore | element_traits | tensorstore/util/element_traits.h | tensorstore/util/element_traits_test.cc | #ifndef TENSORSTORE_UTIL_ELEMENT_TRAITS_H_
#define TENSORSTORE_UTIL_ELEMENT_TRAITS_H_
#include <type_traits>
namespace tensorstore {
template <typename Source, typename Dest>
constexpr inline bool IsElementTypeImplicitlyConvertible =
(std::is_const_v<Source> <= std::is_const_v<Dest>)&&
(std::is_same_v<const Source, const Dest> ||
std::is_void_v<Source> < std::is_void_v<Dest>);
template <class Source, class Dest>
constexpr inline bool IsElementTypeOnlyExplicitlyConvertible =
(std::is_void_v<Source> > std::is_void_v<Dest>)&&
(std::is_const_v<Source> <= std::is_const_v<Dest>);
template <typename Source, typename Dest>
constexpr inline bool IsElementTypeExplicitlyConvertible =
(std::is_const_v<Source> <= std::is_const_v<Dest>)&&
(std::is_void_v<Source> || std::is_void_v<Dest> ||
std::is_same_v<const Source, const Dest>);
template <typename A, typename B>
constexpr inline bool AreElementTypesCompatible =
(std::is_void_v<A> || std::is_void_v<B> ||
std::is_same_v<const A, const B>);
}
#endif | #include "tensorstore/util/element_traits.h"
#include <type_traits>
namespace {
using ::tensorstore::AreElementTypesCompatible;
using ::tensorstore::IsElementTypeImplicitlyConvertible;
using ::tensorstore::IsElementTypeOnlyExplicitlyConvertible;
static_assert(IsElementTypeImplicitlyConvertible<int, int>);
static_assert(IsElementTypeImplicitlyConvertible<const int, const int>);
static_assert(IsElementTypeImplicitlyConvertible<int, const int>);
static_assert(IsElementTypeImplicitlyConvertible<const int, const int>);
static_assert(!IsElementTypeImplicitlyConvertible<const int, int>);
static_assert(!IsElementTypeImplicitlyConvertible<int, float>);
static_assert(!IsElementTypeImplicitlyConvertible<const int, const float>);
static_assert(IsElementTypeImplicitlyConvertible<int, void>);
static_assert(IsElementTypeImplicitlyConvertible<int, const void>);
static_assert(IsElementTypeImplicitlyConvertible<const int, const void>);
static_assert(!IsElementTypeImplicitlyConvertible<const int, void>);
static_assert(!IsElementTypeOnlyExplicitlyConvertible<int, int>);
static_assert(!IsElementTypeOnlyExplicitlyConvertible<int, void>);
static_assert(IsElementTypeOnlyExplicitlyConvertible<void, int>);
static_assert(IsElementTypeOnlyExplicitlyConvertible<void, const int>);
static_assert(IsElementTypeOnlyExplicitlyConvertible<const void, const int>);
static_assert(!IsElementTypeOnlyExplicitlyConvertible<const void, int>);
static_assert(AreElementTypesCompatible<int, int>);
static_assert(AreElementTypesCompatible<const int, int>);
static_assert(AreElementTypesCompatible<int, const int>);
static_assert(AreElementTypesCompatible<const int, const int>);
static_assert(AreElementTypesCompatible<const int, void>);
static_assert(AreElementTypesCompatible<const int, const void>);
static_assert(AreElementTypesCompatible<int, void>);
static_assert(AreElementTypesCompatible<int, const void>);
static_assert(AreElementTypesCompatible<void, const int>);
static_assert(AreElementTypesCompatible<const void, const int>);
static_assert(AreElementTypesCompatible<void, int>);
static_assert(AreElementTypesCompatible<const void, const int>);
static_assert(AreElementTypesCompatible<void, int>);
static_assert(AreElementTypesCompatible<const void, int>);
static_assert(!AreElementTypesCompatible<int, float>);
static_assert(!AreElementTypesCompatible<const int, float>);
static_assert(!AreElementTypesCompatible<int, const float>);
static_assert(!AreElementTypesCompatible<const int, const float>);
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/element_traits.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/element_traits_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b33caddb-e189-4706-ad85-019b3a5d26d2 | cpp | google/tensorstore | iterate_over_index_range | tensorstore/util/iterate_over_index_range.h | tensorstore/util/iterate_over_index_range_test.cc | #ifndef TENSORSTORE_UTIL_ITERATE_OVER_INDEX_RANGE_H_
#define TENSORSTORE_UTIL_ITERATE_OVER_INDEX_RANGE_H_
#include <cassert>
#include <type_traits>
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/void_wrapper.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/constant_vector.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_iterate {
inline constexpr DimensionIndex GetLoopDimension(ContiguousLayoutOrder order,
DimensionIndex outer_dims,
DimensionIndex total_dims) {
return order == ContiguousLayoutOrder::c ? outer_dims
: total_dims - 1 - outer_dims;
}
template <typename Func, typename IndexType, DimensionIndex Rank>
using IterateOverIndexRangeResult = std::decay_t<
std::invoke_result_t<Func, tensorstore::span<const IndexType, Rank>>>;
template <ContiguousLayoutOrder Order, typename Func, typename IndexType,
DimensionIndex Rank>
struct IterateOverIndexRangeHelper {
using IndicesSpan = tensorstore::span<const IndexType, Rank>;
using ResultType = IterateOverIndexRangeResult<Func, IndexType, Rank>;
using WrappedResultType = internal::Void::WrappedType<ResultType>;
static WrappedResultType LoopImpl(
Func func, DimensionIndex outer_dims, const IndexType* origin,
const IndexType* shape, tensorstore::span<IndexType, Rank> indices) {
WrappedResultType result =
internal::DefaultIterationResult<WrappedResultType>::value();
const DimensionIndex cur_dim =
GetLoopDimension(Order, outer_dims, indices.size());
const IndexType start = origin[cur_dim];
const IndexType stop = shape[cur_dim] + start;
if (outer_dims + 1 == indices.size()) {
for (IndexType i = start; i < stop; ++i) {
indices[cur_dim] = i;
result = internal::Void::CallAndWrap(func, IndicesSpan(indices));
if (!result) break;
}
} else {
for (IndexType i = start; i < stop; ++i) {
indices[cur_dim] = i;
result = LoopImpl(func, outer_dims + 1, origin, shape, indices);
if (!result) break;
}
}
return result;
}
static ResultType Start(Func func, const IndexType* origin,
IndicesSpan shape) {
if (shape.size() == 0) {
return func(tensorstore::span<const IndexType, Rank>());
}
assert(shape.size() <= kMaxRank);
IndexType indices[kMaxRank];
return internal::Void::Unwrap(LoopImpl(
func, 0, &origin[0], &shape[0],
tensorstore::span<IndexType, Rank>(&indices[0], shape.size())));
}
};
}
template <ContiguousLayoutOrder Order = ContiguousLayoutOrder::c,
typename IndexType, DimensionIndex Rank, typename Func>
internal_iterate::IterateOverIndexRangeResult<
Func, std::remove_const_t<IndexType>, Rank>
IterateOverIndexRange(tensorstore::span<IndexType, Rank> origin,
tensorstore::span<IndexType, Rank> shape, Func&& func) {
assert(origin.size() == shape.size());
return internal_iterate::IterateOverIndexRangeHelper<
Order, Func, std::remove_const_t<IndexType>, Rank>::Start(func,
origin.data(),
shape);
}
template <ContiguousLayoutOrder Order = ContiguousLayoutOrder::c,
typename BoxType, typename Func>
std::enable_if_t<IsBoxLike<BoxType>,
internal_iterate::IterateOverIndexRangeResult<
Func, Index, BoxType::static_rank>>
IterateOverIndexRange(const BoxType& box, Func&& func,
ContiguousLayoutOrder order = ContiguousLayoutOrder::c) {
return internal_iterate::IterateOverIndexRangeHelper<
Order, Func, Index, BoxType::static_rank>::Start(func,
box.origin().data(),
box.shape());
}
template <ContiguousLayoutOrder Order = ContiguousLayoutOrder::c,
typename IndexType, DimensionIndex Rank, typename Func>
internal_iterate::IterateOverIndexRangeResult<
Func, std::remove_const_t<IndexType>, Rank>
IterateOverIndexRange(tensorstore::span<IndexType, Rank> shape, Func&& func) {
using NonConstIndex = std::remove_const_t<IndexType>;
return internal_iterate::
IterateOverIndexRangeHelper<Order, Func, NonConstIndex, Rank>::Start(
func,
GetConstantVector<NonConstIndex, 0>(GetStaticOrDynamicExtent(shape))
.data(),
shape);
}
}
#endif | #include "tensorstore/util/iterate_over_index_range.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::Index;
using ::tensorstore::IterateOverIndexRange;
using ::tensorstore::span;
TEST(IterateOverIndexRange, COrder) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 0}, {0, 1}, {0, 2},
{1, 0}, {1, 1}, {1, 2}};
IterateOverIndexRange<ContiguousLayoutOrder::c>(
span({2, 3}),
[&](span<const int, 2> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, FortranOrder) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 0}, {1, 0}, {0, 1},
{1, 1}, {0, 2}, {1, 2}};
IterateOverIndexRange<ContiguousLayoutOrder::fortran>(
span({2, 3}),
[&](span<const int, 2> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, COrderWithOrigin) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 1}, {0, 2}, {1, 1}, {1, 2}};
IterateOverIndexRange<ContiguousLayoutOrder::c>(
span({0, 1}), span({2, 2}),
[&](span<const int, 2> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, FortranOrderWithOrigin) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 1}, {1, 1}, {0, 2}, {1, 2}};
IterateOverIndexRange<ContiguousLayoutOrder::fortran>(
span({0, 1}), span({2, 2}),
[&](span<const int, 2> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, COrderWithBox) {
using R = std::vector<Index>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 1}, {0, 2}, {1, 1}, {1, 2}};
IterateOverIndexRange(
tensorstore::BoxView({0, 1}, {2, 2}),
[&](span<const Index, 2> x) { result.emplace_back(x.begin(), x.end()); },
ContiguousLayoutOrder::c);
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, RankZero) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{R{}};
IterateOverIndexRange<ContiguousLayoutOrder::fortran>(
span<const int, 0>(),
[&](span<const int, 0> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, Stop) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 0}, {0, 1}};
EXPECT_EQ(false, IterateOverIndexRange<ContiguousLayoutOrder::c>(
span({2, 3}), [&](span<const int, 2> x) {
result.emplace_back(x.begin(), x.end());
return x[1] != 1;
}));
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, ZeroElementsBoolReturn) {
EXPECT_EQ(true, IterateOverIndexRange<ContiguousLayoutOrder::c>(
span({0}), [&](span<const int, 1> x) { return false; }));
}
TEST(IterateOverIndexRange, StaticRankZero) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{R{}};
IterateOverIndexRange(span<const int, 0>{}, [&](span<const int, 0> x) {
result.emplace_back(x.begin(), x.end());
});
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, DynamicRankZero) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{R{}};
IterateOverIndexRange(span<const int>(nullptr, 0), [&](span<const int> x) {
result.emplace_back(x.begin(), x.end());
});
EXPECT_EQ(expected_result, result);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/iterate_over_index_range.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/iterate_over_index_range_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d35cffbe-967b-4c8d-96bd-dc614f18ca06 | cpp | google/tensorstore | float8 | tensorstore/util/float8.h | tensorstore/util/float8_test.cc | #ifndef TENSORSTORE_UTIL_FLOAT8_H_
#define TENSORSTORE_UTIL_FLOAT8_H_
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <ostream>
#include <type_traits>
#include <utility>
#include "absl/base/casts.h"
#include <half.hpp>
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/util/bfloat16.h"
#if (defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
#include <bit>
#endif
namespace tensorstore {
namespace float8_internal {
class Float8e4m3fn;
class Float8e4m3fnuz;
class Float8e4m3b11fnuz;
class Float8e5m2;
class Float8e5m2fnuz;
template <typename Derived>
class Float8Base {
protected:
struct ConstructFromRepTag {};
constexpr Float8Base(uint8_t rep, ConstructFromRepTag) : rep_{rep} {}
public:
constexpr Float8Base() : rep_(0) {}
template <typename T,
typename EnableIf = std::enable_if<std::is_arithmetic_v<T>>>
explicit Float8Base(T f)
: Float8Base(ConvertFrom(static_cast<float>(f)).rep(),
ConstructFromRepTag{}) {}
explicit Float8Base(double f64)
: Float8Base(ConvertFrom(f64).rep(), ConstructFromRepTag{}) {}
explicit Float8Base(float f32)
: Float8Base(ConvertFrom(f32).rep(), ConstructFromRepTag{}) {}
explicit Float8Base(BFloat16 bf16)
: Float8Base(ConvertFrom(bf16).rep(), ConstructFromRepTag{}) {}
explicit Float8Base(::half_float::half f16)
: Float8Base(ConvertFrom(f16).rep(), ConstructFromRepTag{}) {}
constexpr uint8_t rep() const { return rep_; }
template <typename T,
typename EnableIf = std::enable_if<std::is_arithmetic_v<T>>>
explicit operator T() const {
return static_cast<T>(static_cast<float>(derived()));
}
explicit operator double() const { return ConvertTo<double>(derived()); }
explicit operator float() const { return ConvertTo<float>(derived()); }
explicit operator BFloat16() const { return ConvertTo<BFloat16>(derived()); }
explicit operator ::half_float::half() const {
return ConvertTo<::half_float::half>(derived());
}
explicit operator bool() const { return (rep() & 0x7F) != 0; }
constexpr Derived operator-() const {
return Derived(static_cast<uint8_t>(rep() ^ 0x80), ConstructFromRepTag{});
}
constexpr const Derived& derived() const {
return *static_cast<const Derived*>(this);
}
constexpr Derived& derived() { return *static_cast<Derived*>(this); }
static constexpr Derived FromRep(uint8_t rep) {
return Derived(rep, ConstructFromRepTag{});
}
template <bool kSaturate = false, bool kTruncate = false, typename From>
static Derived ConvertFrom(const From& from);
template <typename To, bool kSaturate = false, bool kTruncate = false>
static To ConvertTo(const Derived& from);
Derived operator+(const Derived& other) const {
return Derived{float{derived()} + float{other}};
}
Derived operator-(const Derived& other) const {
return Derived{float{derived()} - float{other}};
}
Derived operator*(const Derived& other) const {
return Derived{float{derived()} * float{other}};
}
Derived operator/(const Derived& other) const {
return Derived{float{derived()} / float{other}};
}
constexpr bool operator==(const Derived& other) const {
return Compare(derived(), other) == Ordering::kEquivalent;
}
constexpr bool operator!=(const Derived& other) const {
return Compare(derived(), other) != Ordering::kEquivalent;
}
bool operator<(const Derived& other) const {
return Compare(derived(), other) == Ordering::kLess;
}
bool operator<=(const Derived& other) const {
return Compare(derived(), other) <= Ordering::kEquivalent;
}
bool operator>(const Derived& other) const {
return Compare(derived(), other) == Ordering::kGreater;
}
bool operator>=(const Derived& other) const {
Ordering ordering = Compare(derived(), other);
return ordering == Ordering::kGreater || ordering == Ordering::kEquivalent;
}
Derived& operator+=(const Derived& other) {
derived() = derived() + other;
return derived();
}
friend float operator+=(const float& a, Derived b) {
return a + static_cast<float>(b);
}
Derived& operator-=(const Derived& other) {
derived() = derived() - other;
return derived();
}
Derived& operator*=(const Derived& other) {
derived() = derived() * other;
return derived();
}
Derived& operator/=(const Derived& other) {
derived() = derived() / other;
return derived();
}
template <template <typename U, typename V, typename... Args>
class ObjectType ,
template <typename U, typename... Args>
class ArrayType ,
class StringType , class BooleanType ,
class NumberIntegerType ,
class NumberUnsignedType ,
class NumberFloatType ,
template <typename U> class AllocatorType ,
template <typename T, typename SFINAE = void>
class JSONSerializer ,
class BinaryType >
friend void to_json(
::nlohmann::basic_json<ObjectType, ArrayType, StringType, BooleanType,
NumberIntegerType, NumberUnsignedType,
NumberFloatType, AllocatorType, JSONSerializer,
BinaryType>& j,
Derived v) {
j = static_cast<NumberFloatType>(v);
}
private:
static std::pair<uint8_t, uint8_t> SignAndMagnitude(Derived x) {
const uint8_t x_abs_bits = absl::bit_cast<uint8_t>(abs(x));
const uint8_t x_bits = absl::bit_cast<uint8_t>(x);
const uint8_t x_sign = x_bits ^ x_abs_bits;
return {x_sign, x_abs_bits};
}
static int8_t SignAndMagnitudeToTwosComplement(uint8_t sign,
uint8_t magnitude) {
return magnitude ^ (static_cast<int8_t>(sign) < 0 ? -1 : 0);
}
enum Ordering : int8_t {
kLess = -1,
kEquivalent = 0,
kGreater = 1,
kUnordered = 2,
};
friend Ordering Compare(const Derived& lhs, const Derived& rhs) {
if (isnan(lhs) || isnan(rhs)) {
return Ordering::kUnordered;
}
auto [lhs_sign, lhs_mag] = SignAndMagnitude(lhs);
auto [rhs_sign, rhs_mag] = SignAndMagnitude(rhs);
if (lhs_mag == 0 && rhs_mag == 0) {
return Ordering::kEquivalent;
}
int8_t lhs_twos_complement =
SignAndMagnitudeToTwosComplement(lhs_sign, lhs_mag);
int8_t rhs_twos_complement =
SignAndMagnitudeToTwosComplement(rhs_sign, rhs_mag);
if (lhs_twos_complement < rhs_twos_complement) {
return Ordering::kLess;
}
if (lhs_twos_complement > rhs_twos_complement) {
return Ordering::kGreater;
}
return Ordering::kEquivalent;
}
uint8_t rep_;
};
class Float8e4m3fn : public Float8Base<Float8e4m3fn> {
private:
using Base = Float8Base<Float8e4m3fn>;
friend class Float8Base<Float8e4m3fn>;
using Base::Float8Base;
public:
explicit Float8e4m3fn(const Float8e5m2& f8) : Float8e4m3fn(ConvertFrom(f8)) {}
explicit Float8e4m3fn(const Float8e4m3b11fnuz& f8)
: Float8e4m3fn(ConvertFrom(f8)) {}
};
class Float8e4m3b11fnuz : public Float8Base<Float8e4m3b11fnuz> {
private:
using Base = Float8Base<Float8e4m3b11fnuz>;
friend class Float8Base<Float8e4m3b11fnuz>;
using Base::Float8Base;
public:
explicit Float8e4m3b11fnuz(const Float8e5m2& f8)
: Float8e4m3b11fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3b11fnuz(const Float8e5m2fnuz& f8)
: Float8e4m3b11fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3b11fnuz(const Float8e4m3fn& f8)
: Float8e4m3b11fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3b11fnuz(const Float8e4m3fnuz& f8)
: Float8e4m3b11fnuz(ConvertFrom(f8)) {}
constexpr Float8e4m3b11fnuz operator-() const {
if ((rep() & 0x7f) == 0x00) {
return *this;
}
return Base::operator-();
}
Float8e4m3b11fnuz operator-(const Float8e4m3b11fnuz& other) const {
return Base::operator-(other);
}
explicit operator bool() const { return rep() != 0; }
};
class Float8e4m3fnuz : public Float8Base<Float8e4m3fnuz> {
private:
using Base = Float8Base<Float8e4m3fnuz>;
friend class Float8Base<Float8e4m3fnuz>;
using Base::Float8Base;
public:
explicit Float8e4m3fnuz(const Float8e5m2& f8)
: Float8e4m3fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3fnuz(const Float8e5m2fnuz& f8)
: Float8e4m3fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3fnuz(const Float8e4m3b11fnuz& f8)
: Float8e4m3fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3fnuz(const Float8e4m3fn& f8)
: Float8e4m3fnuz(ConvertFrom(f8)) {}
constexpr Float8e4m3fnuz operator-() const {
if ((rep() & 0x7f) == 0x00) {
return *this;
}
return Base::operator-();
}
Float8e4m3fnuz operator-(const Float8e4m3fnuz& other) const {
return Base::operator-(other);
}
explicit operator bool() const { return rep() != 0; }
};
class Float8e5m2 : public Float8Base<Float8e5m2> {
private:
using Base = Float8Base<Float8e5m2>;
friend class Float8Base<Float8e5m2>;
using Base::Float8Base;
public:
explicit Float8e5m2(Float8e4m3fn f8) : Float8e5m2(ConvertFrom(f8)) {}
explicit Float8e5m2(Float8e4m3fnuz f8) : Float8e5m2(ConvertFrom(f8)) {}
explicit Float8e5m2(Float8e4m3b11fnuz f8) : Float8e5m2(ConvertFrom(f8)) {}
explicit Float8e5m2(Float8e5m2fnuz& f8) : Float8e5m2(ConvertFrom(f8)) {}
};
class Float8e5m2fnuz : public Float8Base<Float8e5m2fnuz> {
private:
using Base = Float8Base<Float8e5m2fnuz>;
friend class Float8Base<Float8e5m2fnuz>;
using Base::Float8Base;
public:
explicit Float8e5m2fnuz(const Float8e5m2& f8)
: Float8e5m2fnuz(ConvertFrom(f8)) {}
explicit Float8e5m2fnuz(const Float8e4m3b11fnuz& f8)
: Float8e5m2fnuz(ConvertFrom(f8)) {}
explicit Float8e5m2fnuz(const Float8e4m3fn& f8)
: Float8e5m2fnuz(ConvertFrom(f8)) {}
explicit Float8e5m2fnuz(const Float8e4m3fnuz& f8)
: Float8e5m2fnuz(ConvertFrom(f8)) {}
constexpr Float8e5m2fnuz operator-() const {
if ((rep() & 0x7f) == 0x00) {
return *this;
}
return Base::operator-();
}
Float8e5m2fnuz operator-(const Float8e5m2fnuz& other) const {
return Base::operator-(other);
}
explicit operator bool() const { return rep() != 0; }
};
constexpr double ConstexprAbs(double x) { return x < 0.0 ? -x : x; }
constexpr double ConstexprCeil(double x) {
constexpr double kIntegerThreshold =
uint64_t{1} << (std::numeric_limits<double>::digits - 1);
if (!(ConstexprAbs(x) < kIntegerThreshold)) {
return x;
}
const double x_trunc = static_cast<double>(static_cast<int64_t>(x));
return x_trunc < x ? x_trunc + 1.0 : x_trunc;
}
constexpr double ConstexprFloor(double x) { return -ConstexprCeil(-x); }
constexpr double kLog10Of2 = 0.3010299956639812;
constexpr int Digits10FromDigits(int digits) {
return static_cast<int>(ConstexprFloor((digits - 1) * kLog10Of2));
}
constexpr int MaxDigits10FromDigits(int digits) {
return static_cast<int>(ConstexprCeil(1.0 + (digits * kLog10Of2)));
}
constexpr int MinExponent10FromMinExponent(int min_exponent) {
return static_cast<int>(ConstexprCeil((min_exponent - 1) * kLog10Of2));
}
constexpr int MaxExponent10FromMaxExponentAndDigits(int max_exponent,
int digits) {
constexpr double kLog10OfOnePredecessor[] = {
-0.057991946977686754,
-0.028028723600243537,
};
return static_cast<int>(ConstexprFloor(kLog10OfOnePredecessor[digits - 3] +
max_exponent * kLog10Of2));
}
struct numeric_limits_float8_base {
static inline constexpr const bool is_specialized = true;
static inline constexpr const bool is_signed = true;
static inline constexpr const bool is_integer = false;
static inline constexpr const bool is_exact = false;
static inline constexpr const bool has_quiet_NaN = true;
static inline constexpr const std::float_denorm_style has_denorm =
std::denorm_present;
static inline constexpr const bool has_denorm_loss = false;
static inline constexpr const std::float_round_style round_style =
std::round_to_nearest;
static inline constexpr const bool is_bounded = true;
static inline constexpr const bool is_modulo = false;
static inline constexpr const int radix = std::numeric_limits<float>::radix;
static inline constexpr const bool traps = std::numeric_limits<float>::traps;
static inline constexpr const bool tinyness_before =
std::numeric_limits<float>::tinyness_before;
};
struct numeric_limits_float8_e4m3fn : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 7;
static inline constexpr const int kMantissaBits = 3;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent =
(0b1111 - 7) + 1;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = false;
static inline constexpr const bool has_infinity = false;
static inline constexpr const bool has_signaling_NaN = false;
static constexpr Float8e4m3fn min() {
return Float8e4m3fn::FromRep(0b0'0001 << kMantissaBits);
}
static constexpr Float8e4m3fn lowest() {
return Float8e4m3fn::FromRep(0b1'1111'110);
}
static constexpr Float8e4m3fn max() {
return Float8e4m3fn::FromRep(0b0'1111'110);
}
static constexpr Float8e4m3fn epsilon() {
return Float8e4m3fn::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e4m3fn round_error() {
return Float8e4m3fn::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e4m3fn infinity() {
return Float8e4m3fn::FromRep(0b0'1111'111);
}
static constexpr Float8e4m3fn quiet_NaN() {
return Float8e4m3fn::FromRep(0b0'1111'111);
}
static constexpr Float8e4m3fn signaling_NaN() {
return Float8e4m3fn::FromRep(0b0'1111'111);
}
static constexpr Float8e4m3fn denorm_min() {
return Float8e4m3fn::FromRep(0b0'0000'001);
}
};
struct numeric_limits_float8_e4m3b11fnuz : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 11;
static inline constexpr const int kMantissaBits = 3;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent =
(0b1111 - kExponentBias) + 1;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = false;
static inline constexpr const bool has_infinity = false;
static inline constexpr const bool has_signaling_NaN = false;
static constexpr Float8e4m3b11fnuz min() {
return Float8e4m3b11fnuz::FromRep(1 << kMantissaBits);
}
static constexpr Float8e4m3b11fnuz lowest() {
return Float8e4m3b11fnuz::FromRep(0b1'1111'111);
}
static constexpr Float8e4m3b11fnuz max() {
return Float8e4m3b11fnuz::FromRep(0b0'1111'111);
}
static constexpr Float8e4m3b11fnuz epsilon() {
return Float8e4m3b11fnuz::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e4m3b11fnuz round_error() {
return Float8e4m3b11fnuz::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e4m3b11fnuz infinity() {
return Float8e4m3b11fnuz::FromRep(0b1'0000'000);
}
static constexpr Float8e4m3b11fnuz quiet_NaN() {
return Float8e4m3b11fnuz::FromRep(0b1'0000'000);
}
static constexpr Float8e4m3b11fnuz signaling_NaN() {
return Float8e4m3b11fnuz::FromRep(0b1'0000'000);
}
static constexpr Float8e4m3b11fnuz denorm_min() {
return Float8e4m3b11fnuz::FromRep(0b0'0000'001);
}
};
struct numeric_limits_float8_e4m3fnuz : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 8;
static inline constexpr const int kMantissaBits = 3;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent =
(0b1111 - kExponentBias) + 1;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = false;
static inline constexpr const bool has_infinity = false;
static inline constexpr const bool has_signaling_NaN = false;
static constexpr Float8e4m3fnuz min() {
return Float8e4m3fnuz::FromRep(0x08);
}
static constexpr Float8e4m3fnuz lowest() {
return Float8e4m3fnuz::FromRep(0xFF);
}
static constexpr Float8e4m3fnuz max() {
return Float8e4m3fnuz::FromRep(0x7F);
}
static constexpr Float8e4m3fnuz epsilon() {
return Float8e4m3fnuz::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e4m3fnuz round_error() {
return Float8e4m3fnuz::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e4m3fnuz infinity() {
return Float8e4m3fnuz::FromRep(0x80);
}
static constexpr Float8e4m3fnuz quiet_NaN() {
return Float8e4m3fnuz::FromRep(0x80);
}
static constexpr Float8e4m3fnuz signaling_NaN() {
return Float8e4m3fnuz::FromRep(0x80);
}
static constexpr Float8e4m3fnuz denorm_min() {
return Float8e4m3fnuz::FromRep(0x01);
}
};
struct numeric_limits_float8_e5m2 : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 15;
static inline constexpr const int kMantissaBits = 2;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent = 0b11111 - kExponentBias;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = true;
static inline constexpr const bool has_infinity = true;
static inline constexpr const bool has_signaling_NaN = true;
static constexpr Float8e5m2 min() {
return Float8e5m2::FromRep(1 << kMantissaBits);
}
static constexpr Float8e5m2 lowest() {
return Float8e5m2::FromRep(0b1'11110'11);
}
static constexpr Float8e5m2 max() {
return Float8e5m2::FromRep(0b0'11110'11);
}
static constexpr Float8e5m2 epsilon() {
return Float8e5m2::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e5m2 round_error() {
return Float8e5m2::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e5m2 infinity() {
return Float8e5m2::FromRep(0b0'11111'00);
}
static constexpr Float8e5m2 quiet_NaN() {
return Float8e5m2::FromRep(0b0'11111'10);
}
static constexpr Float8e5m2 signaling_NaN() {
return Float8e5m2::FromRep(0b0'11111'01);
}
static constexpr Float8e5m2 denorm_min() {
return Float8e5m2::FromRep(0b0'00000'01);
}
};
struct numeric_limits_float8_e5m2fnuz : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 16;
static inline constexpr const int kMantissaBits = 2;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent =
(0b11111 - kExponentBias) + 1;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = false;
static inline constexpr const bool has_infinity = false;
static inline constexpr const bool has_signaling_NaN = false;
static constexpr Float8e5m2fnuz min() {
return Float8e5m2fnuz::FromRep(0x04);
}
static constexpr Float8e5m2fnuz lowest() {
return Float8e5m2fnuz::FromRep(0xFF);
}
static constexpr Float8e5m2fnuz max() {
return Float8e5m2fnuz::FromRep(0x7F);
}
static constexpr Float8e5m2fnuz epsilon() {
return Float8e5m2fnuz::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e5m2fnuz round_error() {
return Float8e5m2fnuz::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e5m2fnuz infinity() {
return Float8e5m2fnuz::FromRep(0x80);
}
static constexpr Float8e5m2fnuz quiet_NaN() {
return Float8e5m2fnuz::FromRep(0x80);
}
static constexpr Float8e5m2fnuz signaling_NaN() {
return Float8e5m2fnuz::FromRep(0x80);
}
static constexpr Float8e5m2fnuz denorm_min() {
return Float8e5m2fnuz::FromRep(0x01);
}
};
}
}
namespace std {
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e4m3fn>
: public tensorstore::float8_internal::numeric_limits_float8_e4m3fn {};
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e4m3b11fnuz>
: public tensorstore::float8_internal::numeric_limits_float8_e4m3b11fnuz {};
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e4m3fnuz>
: public tensorstore::float8_internal::numeric_limits_float8_e4m3fnuz {};
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e5m2>
: public tensorstore::float8_internal::numeric_limits_float8_e5m2 {};
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e5m2fnuz>
: public tensorstore::float8_internal::numeric_limits_float8_e5m2fnuz {};
}
namespace tensorstore {
namespace float8_internal {
constexpr inline Float8e4m3fn abs(const Float8e4m3fn& a) {
return Float8e4m3fn::FromRep(a.rep() & 0b0'1111'111);
}
constexpr inline bool(isnan)(const Float8e4m3fn& a) {
return abs(a).rep() == std::numeric_limits<Float8e4m3fn>::quiet_NaN().rep();
}
constexpr inline Float8e4m3b11fnuz abs(const Float8e4m3b11fnuz& a) {
return (a.rep() & 0b0'1111'111) == 0
? Float8e4m3b11fnuz::FromRep(a.rep())
: Float8e4m3b11fnuz::FromRep(a.rep() & 0b0'1111'111);
}
constexpr inline bool(isnan)(const Float8e4m3b11fnuz& a) {
return a.rep() == std::numeric_limits<Float8e4m3b11fnuz>::quiet_NaN().rep();
}
constexpr inline Float8e4m3fnuz abs(const Float8e4m3fnuz& a) {
return (a.rep() & 0x7F) == 0 ? Float8e4m3fnuz::FromRep(a.rep())
: Float8e4m3fnuz::FromRep(a.rep() & 0x7F);
}
constexpr inline bool(isnan)(const Float8e4m3fnuz& a) {
return abs(a).rep() == std::numeric_limits<Float8e4m3fnuz>::quiet_NaN().rep();
}
constexpr inline Float8e5m2 abs(const Float8e5m2& a) {
return Float8e5m2::FromRep(a.rep() & 0b0'11111'11);
}
constexpr inline bool(isnan)(const Float8e5m2& a) {
return abs(a).rep() > std::numeric_limits<Float8e5m2>::infinity().rep();
}
constexpr inline Float8e5m2fnuz abs(const Float8e5m2fnuz& a) {
return (a.rep() & 0x7F) == 0 ? Float8e5m2fnuz::FromRep(a.rep())
: Float8e5m2fnuz::FromRep(a.rep() & 0x7F);
}
constexpr inline bool isnan(const Float8e5m2fnuz& a) { return a.rep() == 0x80; }
template <typename Float8>
constexpr inline bool(isinf)(const Float8Base<Float8>& a) {
return std::numeric_limits<Float8>::has_infinity
? abs(a.derived()).rep() ==
std::numeric_limits<Float8>::infinity().rep()
: false;
}
template <typename Float8>
constexpr inline bool(isfinite)(const Float8Base<Float8>& a) {
return !isnan(a.derived()) && !isinf(a.derived());
}
template <typename Float8>
std::ostream& operator<<(std::ostream& os, const Float8Base<Float8>& f8) {
os << static_cast<float>(f8.derived());
return os;
}
template <size_t Size>
struct get_integer_by_size {
typedef void signed_type;
typedef void unsigned_type;
};
template <>
struct get_integer_by_size<1> {
typedef int8_t signed_type;
typedef uint8_t unsigned_type;
};
template <>
struct get_integer_by_size<2> {
typedef int16_t signed_type;
typedef uint16_t unsigned_type;
};
template <>
struct get_integer_by_size<4> {
typedef int32_t signed_type;
typedef uint32_t unsigned_type;
};
template <>
struct get_integer_by_size<8> {
typedef int64_t signed_type;
typedef uint64_t unsigned_type;
};
template <int kNumBytes>
using GetUnsignedInteger =
typename get_integer_by_size<kNumBytes>::unsigned_type;
template <typename From, typename To, bool kSaturate, bool kTruncate,
typename EnableIf = void>
struct ConvertImpl;
template <typename Scalar>
struct IdentityConversion {
static inline Scalar run(const Scalar& from) { return from; }
};
template <typename Scalar>
struct ConvertImpl<Scalar, Scalar, false, false,
void> : public IdentityConversion<Scalar> {};
template <typename Scalar>
struct ConvertImpl<Scalar, Scalar, false, true,
void> : public IdentityConversion<Scalar> {};
template <typename Scalar>
struct ConvertImpl<Scalar, Scalar, true, false,
void> : public IdentityConversion<Scalar> {};
template <typename Scalar>
struct ConvertImpl<Scalar, Scalar, true, true,
void> : public IdentityConversion<Scalar> {};
template <typename Float>
struct TraitsBase {
using BitsType = GetUnsignedInteger<sizeof(Float)>;
static constexpr int kBits = sizeof(Float) * CHAR_BIT;
static constexpr int kMantissaBits = std::numeric_limits<Float>::digits - 1;
static constexpr int kExponentBits = kBits - kMantissaBits - 1;
static constexpr BitsType kExponentMask = ((BitsType{1} << kExponentBits) - 1)
<< kMantissaBits;
static constexpr BitsType kMantissaMask = (BitsType{1} << kMantissaBits) - 1;
static constexpr int kExponentBias = (1 << (kExponentBits - 1)) - 1;
};
template <typename Float>
struct Traits : public TraitsBase<Float> {};
template <>
struct Traits<Float8e4m3b11fnuz> : public TraitsBase<Float8e4m3b11fnuz> {
static constexpr int kExponentBias = 11;
};
template <>
struct Traits<Float8e4m3fnuz> : public TraitsBase<Float8e4m3fnuz> {
using Base = TraitsBase<Float8e4m3fnuz>;
static constexpr int kExponentBias = Base::kExponentBias + 1;
};
template <>
struct Traits<Float8e5m2fnuz> : public TraitsBase<Float8e5m2fnuz> {
using Base = TraitsBase<Float8e5m2fnuz>;
static constexpr int kExponentBias = Base::kExponentBias + 1;
};
template <typename Bits>
constexpr inline Bits RoundBitsToNearestEven(Bits bits, int roundoff) {
Bits bias = roundoff == 0
? 0
: ((bits >> roundoff) & 1) + (Bits{1} << (roundoff - 1)) - 1;
return bits + bias;
}
#if (defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
using std::countl_zero;
#else
static constexpr inline int countl_zero(uint64_t x) {
int zeroes = 60;
if (x >> 32) {
zeroes -= 32;
x >>= 32;
}
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
}
static constexpr inline int countl_zero(uint32_t x) {
int zeroes = 28;
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
}
static constexpr inline int countl_zero(uint16_t x) {
int zeroes = 12;
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
}
static constexpr inline int countl_zero(uint8_t x) {
int zeroes = 4;
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
}
#endif
template <typename From, typename To, bool kSaturate, bool kTruncate>
struct ConvertImpl<From, To, kSaturate, kTruncate,
std::enable_if_t<!std::is_same_v<From, To>>> {
using FromTraits = Traits<From>;
using FromBits = typename FromTraits::BitsType;
static constexpr int kFromBits = FromTraits::kBits;
static constexpr int kFromMantissaBits = FromTraits::kMantissaBits;
static constexpr int kFromExponentBits = FromTraits::kExponentBits;
static constexpr int kFromExponentBias = FromTraits::kExponentBias;
static constexpr FromBits kFromExponentMask = FromTraits::kExponentMask;
using ToTraits = Traits<To>;
using ToBits = typename ToTraits::BitsType;
static constexpr int kToBits = ToTraits::kBits;
static constexpr int kToMantissaBits = ToTraits::kMantissaBits;
static constexpr int kToExponentBits = ToTraits::kExponentBits;
static constexpr int kToExponentBias = ToTraits::kExponentBias;
static constexpr ToBits kToExponentMask = ToTraits::kExponentMask;
static constexpr int kWideBits =
(std::max(kToMantissaBits, kFromMantissaBits)) +
(std::max(kToExponentBits, kFromExponentBits));
static constexpr int kWideBytes = (kWideBits + (CHAR_BIT - 1)) / CHAR_BIT;
using WideBits = GetUnsignedInteger<kWideBytes>;
static constexpr int kExponentOffset = kToExponentBias - kFromExponentBias;
static constexpr int kDigitShift = kToMantissaBits - kFromMantissaBits;
static inline To run(const From& from) {
using std::abs;
using std::isinf;
using std::isnan;
const bool from_sign_bit =
absl::bit_cast<FromBits>(from) >> (kFromBits - 1);
const FromBits from_bits = absl::bit_cast<FromBits>(abs(from));
if (isinf(from)) {
return from_sign_bit ? -std::numeric_limits<To>::infinity()
: std::numeric_limits<To>::infinity();
}
if (isnan(from)) {
return from_sign_bit ? -std::numeric_limits<To>::quiet_NaN()
: std::numeric_limits<To>::quiet_NaN();
}
if (from_bits == 0) {
return from_sign_bit ? -To{} : To{};
}
const int biased_from_exponent = from_bits >> kFromMantissaBits;
if constexpr (std::numeric_limits<To>::min_exponent <
std::numeric_limits<From>::min_exponent) {
if (biased_from_exponent == 0) {
WideBits bits = from_bits;
const int normalization_factor =
countl_zero(from_bits) - (kFromBits - kFromMantissaBits) + 1;
const int biased_exponent = kExponentOffset - normalization_factor + 1;
if (biased_exponent <= 0) {
if constexpr (kExponentOffset < sizeof(WideBits) * CHAR_BIT) {
bits <<= kExponentOffset;
}
} else {
bits <<= normalization_factor;
bits &= ~(WideBits{1} << kFromMantissaBits);
bits |= static_cast<WideBits>(biased_exponent) << kFromMantissaBits;
}
if constexpr (kDigitShift > 0) {
bits <<= kDigitShift;
} else {
if constexpr (!kTruncate) {
bits = RoundBitsToNearestEven(bits, -kDigitShift);
}
bits >>= -kDigitShift;
}
To to = absl::bit_cast<To>(static_cast<ToBits>(bits));
return from_sign_bit ? -to : to;
}
}
if constexpr (std::numeric_limits<To>::min_exponent >
std::numeric_limits<From>::min_exponent) {
const int unbiased_exponent = biased_from_exponent - kFromExponentBias;
const int biased_to_exponent = unbiased_exponent + kToExponentBias;
if (biased_to_exponent <= 0) {
FromBits from_has_leading_one = (biased_from_exponent > 0 ? 1 : 0);
int exponent_shift =
-kDigitShift - biased_to_exponent + from_has_leading_one;
FromBits rounded_from_bits =
(from_bits & FromTraits::kMantissaMask) |
(from_has_leading_one << kFromMantissaBits);
ToBits bits = 0;
if (exponent_shift <= kFromMantissaBits + 1) {
if constexpr (!kTruncate) {
rounded_from_bits =
RoundBitsToNearestEven(rounded_from_bits, exponent_shift);
}
bits = (rounded_from_bits >> exponent_shift);
}
To to = absl::bit_cast<To>(bits);
return from_sign_bit ? -to : to;
}
}
WideBits rounded_from_bits = from_bits;
if constexpr (kDigitShift < 0) {
if constexpr (!kTruncate) {
rounded_from_bits = RoundBitsToNearestEven(from_bits, -kDigitShift);
}
rounded_from_bits &= ~((WideBits{1} << (-kDigitShift)) - 1);
}
rounded_from_bits += static_cast<WideBits>(kExponentOffset)
<< kFromMantissaBits;
ToBits bits;
const WideBits kToHighestRep =
absl::bit_cast<ToBits>(std::numeric_limits<To>::max());
WideBits aligned_highest{kToHighestRep};
if constexpr (kDigitShift < 0) {
aligned_highest <<= -kDigitShift;
bits = static_cast<ToBits>(rounded_from_bits >> -kDigitShift);
} else if constexpr (kDigitShift >= 0) {
rounded_from_bits <<= kDigitShift;
bits = ToBits{rounded_from_bits};
}
To to = absl::bit_cast<To>(bits);
if constexpr (std::make_pair(std::numeric_limits<To>::max_exponent,
std::numeric_limits<To>::digits) <
std::make_pair(std::numeric_limits<From>::max_exponent,
std::numeric_limits<From>::digits)) {
if (rounded_from_bits > aligned_highest) {
to = kSaturate ? std::numeric_limits<To>::max()
: std::numeric_limits<To>::infinity();
}
}
return from_sign_bit ? -to : to;
}
};
template <bool kTruncate>
struct ConvertImpl<Float8e4m3fn, Float8e5m2, true, kTruncate> {
static inline Float8e5m2 run(const Float8e4m3fn& from) {
return ConvertImpl<Float8e4m3fn, Float8e5m2, false, kTruncate>::run(from);
}
};
template <bool kSaturate, bool kTruncate>
struct ConvertImpl<::half_float::half, Float8e5m2, kSaturate, kTruncate> {
static inline Float8e5m2 run(const ::half_float::half& from) {
uint16_t from_bits = absl::bit_cast<uint16_t>(from);
uint16_t abs_bits = from_bits & 0x7FFF;
if (abs_bits == 0x7C00) {
return Float8e5m2::FromRep(from_bits >> 8);
} else if (abs_bits > 0x7C00) {
return Float8e5m2::FromRep((from_bits >> 8) | 0b0'00000'10);
}
if constexpr (!kTruncate) {
from_bits = RoundBitsToNearestEven(from_bits, 8);
if constexpr (kSaturate) {
const Float8e5m2 kHighest = std::numeric_limits<Float8e5m2>::max();
if ((from_bits & 0x7F00) > static_cast<uint16_t>(kHighest.rep()) << 8) {
const bool from_sign_bit = from_bits >> 15;
return from_sign_bit ? -kHighest : kHighest;
}
}
}
return Float8e5m2::FromRep(from_bits >> 8);
}
};
template <>
struct ConvertImpl<Float8e5m2, ::half_float::half, false,
false> {
static inline ::half_float::half run(const Float8e5m2& from) {
return absl::bit_cast<::half_float::half>(
static_cast<uint16_t>(static_cast<uint16_t>(from.rep()) << 8));
}
};
template <bool kSaturate, bool kTruncate>
struct ConvertImpl<Float8e5m2, ::half_float::half, kSaturate, kTruncate> {
static inline ::half_float::half run(const Float8e5m2& from) {
return absl::bit_cast<::half_float::half>(
static_cast<uint16_t>(static_cast<uint16_t>(from.rep()) << 8));
}
};
template <bool kSaturate, bool kTruncate>
struct ConvertImpl<Float8e5m2fnuz, ::half_float::half, kSaturate, kTruncate> {
static inline ::half_float::half run(const Float8e5m2fnuz& from) {
return static_cast<::half_float::half>(static_cast<float>(from));
}
};
template <typename Derived>
template <bool kSaturate, bool kTruncate, typename From>
Derived Float8Base<Derived>::ConvertFrom(const From& from) {
return ConvertImpl<From, Derived, kSaturate, kTruncate>::run(from);
}
template <typename Derived>
template <typename To, bool kSaturate, bool kTruncate>
To Float8Base<Derived>::ConvertTo(const Derived& from) {
return ConvertImpl<Derived, To, kSaturate, kTruncate>::run(from);
}
#ifdef _MSC_VER
#define TENSORSTORE_INTERNAL_FPCLASSIFY(Float8) \
inline int fpclassify(Float8 a) noexcept { \
if (tensorstore::float8_internal::isnan(a)) return FP_NAN; \
if (tensorstore::float8_internal::isinf(a)) return FP_INFINITE; \
Float8 abs_value = tensorstore::float8_internal::abs(a); \
if (abs_value.rep() == 0x00) return FP_ZERO; \
if ((abs_value.rep() & Traits<Float8>::kExponentMask) == 0) \
return FP_SUBNORMAL; \
return FP_NORMAL; \
}
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e4m3fn);
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e4m3fnuz);
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e4m3b11fnuz);
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e5m2);
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e5m2fnuz);
#undef TENSORSTORE_INTERNAL_FPCLASSIFY
#endif
}
using Float8e4m3fn = float8_internal::Float8e4m3fn;
using Float8e4m3fnuz = float8_internal::Float8e4m3fnuz;
using Float8e4m3b11fnuz = float8_internal::Float8e4m3b11fnuz;
using Float8e5m2 = float8_internal::Float8e5m2;
using Float8e5m2fnuz = float8_internal::Float8e5m2fnuz;
}
#endif | #include "tensorstore/util/float8.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "absl/strings/str_cat.h"
#include <half.hpp>
#include "tensorstore/util/bfloat16.h"
namespace tensorstore {
namespace {
using std::isfinite;
using std::isinf;
using std::isnan;
template <typename Float8_>
class Float8Test : public ::testing::Test {};
struct Float8TestParamNames {
template <typename TypeParam>
static std::string GetName(int idx) {
if constexpr (std::is_same_v<TypeParam, Float8e4m3fn>) {
return "Float8e4m3fn";
} else if constexpr (std::is_same_v<TypeParam, Float8e4m3b11fnuz>) {
return "Float8e4m3b11fnuz";
} else if constexpr (std::is_same_v<TypeParam, Float8e5m2>) {
return "Float8e5m2";
} else if constexpr (std::is_same_v<TypeParam, Float8e4m3fnuz>) {
return "Float8e4m3fnuz";
} else if constexpr (std::is_same_v<TypeParam, Float8e5m2fnuz>) {
return "Float8e5m2fnuz";
}
return absl::StrCat(idx);
}
};
using Float8Types =
::testing::Types<Float8e4m3fn, Float8e5m2, Float8e4m3b11fnuz,
Float8e4m3fnuz, Float8e5m2fnuz>;
TYPED_TEST_SUITE(Float8Test, Float8Types, Float8TestParamNames);
TEST(Float8E4m3fnTest, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fn>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fn>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::min()),
std::exp2(-6));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::max()), 448);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::lowest()),
-448);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::epsilon()),
0.125);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3fn>::round_error()),
0.5);
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fn>::infinity()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::denorm_min()),
std::exp2(-9));
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::digits, 4);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::max_digits10, 3);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::min_exponent, -5);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::min_exponent10, -1);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::max_exponent, 9);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::max_exponent10, 2);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::is_iec559, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::has_infinity, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::has_signaling_NaN, false);
}
TEST(Float8E4m3b11fnuzTest, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3b11fnuz>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3b11fnuz>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::min()),
std::exp2(-10));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::max()),
30);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::lowest()),
-30);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::epsilon()),
0.125);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::round_error()),
0.5);
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3b11fnuz>::infinity()));
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::denorm_min()),
std::exp2(-13));
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::digits, 4);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::max_digits10, 3);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::min_exponent, -9);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::min_exponent10, -3);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::max_exponent, 5);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::max_exponent10, 1);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::is_iec559, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::has_infinity, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::has_signaling_NaN, false);
}
TEST(Float8E4m3fnuzTest, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fnuz>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fnuz>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::min()),
std::exp2(-7));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::max()),
240);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::lowest()),
-240);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::epsilon()),
0.125);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::round_error()),
0.5);
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fnuz>::infinity()));
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::denorm_min()),
std::exp2(-10));
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::digits, 4);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::max_digits10, 3);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::min_exponent, -6);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::min_exponent10, -2);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::max_exponent, 8);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::max_exponent10, 2);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::is_iec559, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::has_infinity, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::has_signaling_NaN, false);
}
TEST(Float8E5m2Test, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::min()),
std::exp2(-14));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::max()), 57344);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::lowest()),
-57344);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::epsilon()),
0.25);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::round_error()),
0.5);
EXPECT_TRUE(isinf(std::numeric_limits<Float8e5m2>::infinity()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::denorm_min()),
std::exp2(-16));
EXPECT_EQ(std::numeric_limits<Float8e5m2>::digits, 3);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::max_digits10, 2);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::min_exponent, -13);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::min_exponent10, -4);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::max_exponent, 16);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::max_exponent10, 4);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::is_iec559, true);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::has_infinity, true);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::has_signaling_NaN, true);
}
TEST(Float8E5m2fnuzTest, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2fnuz>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2fnuz>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::min()),
std::exp2(-15));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::max()),
57344);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::lowest()),
-57344);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::epsilon()),
0.25);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::round_error()),
0.5);
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2fnuz>::infinity()));
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::denorm_min()),
std::exp2(-17));
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::digits, 3);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::max_digits10, 2);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::min_exponent, -14);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::min_exponent10, -4);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::max_exponent, 16);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::max_exponent10, 4);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::is_iec559, false);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::has_infinity, false);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::has_signaling_NaN, false);
}
TYPED_TEST(Float8Test, FromRep) {
using Float8 = TypeParam;
Float8 x = Float8::FromRep(0x4F);
EXPECT_EQ(x.rep(), 0x4F);
}
TYPED_TEST(Float8Test, Negate) {
using Float8 = TypeParam;
Float8 x = -Float8::FromRep(0x4F);
EXPECT_EQ(x.rep(), 0x80 | 0x4F);
Float8 nan = -std::numeric_limits<Float8>::quiet_NaN();
EXPECT_TRUE(isnan(nan));
}
TYPED_TEST(Float8Test, BitCasts) {
using Float8 = TypeParam;
Float8 x = Float8::FromRep(0x47);
EXPECT_EQ(absl::bit_cast<uint8_t>(x), 0x47);
EXPECT_EQ(absl::bit_cast<Float8>(x.rep()).rep(), 0x47);
}
TYPED_TEST(Float8Test, UpCasts) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
Float8 f8 = Float8::FromRep(i);
double f64 = static_cast<double>(f8);
float f32 = static_cast<float>(f8);
tensorstore::BFloat16 bf16 = static_cast<tensorstore::BFloat16>(f8);
::half_float::half f16 = static_cast<::half_float::half>(f8);
if (isnan(f8)) {
EXPECT_TRUE(std::isnan(f64));
EXPECT_TRUE(std::isnan(f32));
EXPECT_TRUE(tensorstore::isnan(bf16));
EXPECT_TRUE(::half_float::isnan(f16));
} else {
EXPECT_EQ(f64, f32);
EXPECT_EQ(f32, bf16);
EXPECT_EQ(bf16, f16);
}
}
}
TYPED_TEST(Float8Test, DownCasts) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
float x = static_cast<float>(Float8::FromRep(i));
Float8 f64 = static_cast<Float8>(static_cast<double>(x));
Float8 f32 = static_cast<Float8>(static_cast<float>(x));
Float8 bf16 = static_cast<Float8>(static_cast<tensorstore::BFloat16>(x));
Float8 f16 = static_cast<Float8>(static_cast<::half_float::half>(x));
if (std::isnan(x)) {
EXPECT_TRUE(isnan(f64));
EXPECT_TRUE(isnan(f32));
EXPECT_TRUE(isnan(bf16));
EXPECT_TRUE(isnan(f16));
} else {
EXPECT_EQ(f64.rep(), i) << i;
EXPECT_EQ(f32.rep(), i) << i;
EXPECT_EQ(bf16.rep(), i) << i;
EXPECT_EQ(f16.rep(), i) << i;
}
}
}
TYPED_TEST(Float8Test, ConvertFromWithSaturation) {
using Float8 = TypeParam;
Float8 upper =
Float8::template ConvertFrom<true, false>(
static_cast<float>(std::numeric_limits<Float8>::max()) * 2);
EXPECT_EQ(upper, std::numeric_limits<Float8>::max());
Float8 lower =
Float8::template ConvertFrom<true, false>(
static_cast<float>(std::numeric_limits<Float8>::lowest()) * 2);
EXPECT_EQ(lower, std::numeric_limits<Float8>::lowest());
Float8 nan =
Float8::template ConvertFrom<true, true>(
std::numeric_limits<float>::quiet_NaN());
EXPECT_TRUE(isnan(nan));
Float8 inf =
Float8::template ConvertFrom<true, true>(
std::numeric_limits<float>::infinity());
EXPECT_TRUE(std::numeric_limits<Float8>::has_infinity ? isinf(inf)
: isnan(inf));
Float8 ninf =
Float8::template ConvertFrom<true, true>(
-std::numeric_limits<float>::infinity());
EXPECT_TRUE(std::numeric_limits<Float8>::has_infinity ? isinf(ninf)
: isnan(ninf));
}
TYPED_TEST(Float8Test, ConvertFromWithTruncation) {
using Float8 = TypeParam;
float less_than_two = absl::bit_cast<float>(0x3FFFFFFF);
Float8 truncated =
Float8::template ConvertFrom<false, true>(
less_than_two);
EXPECT_LT(static_cast<float>(truncated), 2);
Float8 rounded =
Float8::template ConvertFrom<false, false>(
less_than_two);
EXPECT_EQ(static_cast<float>(rounded), 2);
double kLarge = 0x1.c001p+16;
EXPECT_EQ(
(Float8::template ConvertFrom<false, true>(
kLarge)
.rep()),
std::numeric_limits<Float8>::infinity().rep());
EXPECT_EQ(
(Float8::template ConvertFrom<false, false>(
kLarge)
.rep()),
std::numeric_limits<Float8>::infinity().rep());
for (int i = 0x01; i < 0x04; ++i) {
float less_than_subnorm =
std::nexttoward(static_cast<float>(Float8::FromRep(i)), 0);
Float8 truncated_subnorm =
Float8::template ConvertFrom<false, true>(
less_than_subnorm);
EXPECT_EQ(truncated_subnorm.rep(), i - 1);
Float8 rounded_subnorm =
Float8::template ConvertFrom<false, false>(
less_than_subnorm);
EXPECT_EQ(rounded_subnorm.rep(), i);
}
}
TYPED_TEST(Float8Test, ConvertTo) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
Float8 f8 = Float8::FromRep(i);
float f32 = static_cast<float>(f8);
if (isnan(f8)) {
EXPECT_TRUE(isnan(Float8::template ConvertTo<float, false,
false>(f8)));
EXPECT_TRUE(isnan(Float8::template ConvertTo<float, false,
true>(f8)));
EXPECT_TRUE(isnan(Float8::template ConvertTo<float, true,
false>(f8)));
EXPECT_TRUE(isnan(Float8::template ConvertTo<float, true,
true>(f8)));
} else {
EXPECT_EQ(f32, (Float8::template ConvertTo<float, false,
false>(f8)));
EXPECT_EQ(f32, (Float8::template ConvertTo<float, false,
true>(f8)));
EXPECT_EQ(f32, (Float8::template ConvertTo<float, true,
false>(f8)));
EXPECT_EQ(f32, (Float8::template ConvertTo<float, true,
true>(f8)));
}
}
}
TEST(Float8Test, Float8E5m2_To_Float8E4m3) {
Float8e5m2 max = std::numeric_limits<Float8e5m2>::max();
Float8e4m3fn saturated = Float8e4m3fn::ConvertFrom<true>(max);
EXPECT_EQ(saturated, std::numeric_limits<Float8e4m3fn>::max());
saturated = Float8e5m2::ConvertTo<Float8e4m3fn, true>(max);
EXPECT_EQ(saturated, std::numeric_limits<Float8e4m3fn>::max());
Float8e5m2 less_than_subnorm = Float8e5m2::FromRep(0x1F);
Float8e4m3fn rounded_subnorm =
Float8e4m3fn::ConvertFrom<false, false>(
less_than_subnorm);
EXPECT_EQ(rounded_subnorm.rep(), 0x04);
Float8e4m3fn truncated_subnorm =
Float8e4m3fn::ConvertFrom<false, true>(
less_than_subnorm);
EXPECT_EQ(truncated_subnorm.rep(), 0x03);
}
TEST(Float8Test, Half_To_Float8E4m3) {
::half_float::half big_half(0x1.dfcp+8f);
Float8e4m3fn big_e4m3 =
Float8e4m3fn::ConvertFrom<true, false>(
big_half);
EXPECT_EQ(big_e4m3.rep(), std::numeric_limits<Float8e4m3fn>::max().rep());
}
TEST(Float8Test, Float8E5m2_To_Float8E4m3b11fnuz) {
Float8e5m2 max = std::numeric_limits<Float8e5m2>::max();
Float8e4m3b11fnuz saturated =
Float8e4m3b11fnuz::ConvertFrom<true>(max);
EXPECT_EQ(saturated, std::numeric_limits<Float8e4m3b11fnuz>::max());
saturated = Float8e5m2::ConvertTo<Float8e4m3b11fnuz, true>(max);
EXPECT_EQ(saturated, std::numeric_limits<Float8e4m3b11fnuz>::max());
Float8e5m2 less_than_subnorm = Float8e5m2::FromRep(0x0F);
Float8e4m3b11fnuz rounded_subnorm =
Float8e4m3b11fnuz::ConvertFrom<false, false>(
less_than_subnorm);
EXPECT_EQ(rounded_subnorm.rep(), 0x04);
Float8e4m3b11fnuz truncated_subnorm =
Float8e4m3b11fnuz::ConvertFrom<false, true>(
less_than_subnorm);
EXPECT_EQ(truncated_subnorm.rep(), 0x03);
for (uint8_t i = 0; i < std::numeric_limits<Float8e5m2>::infinity().rep();
++i) {
Float8e5m2 big_e5m2 = absl::bit_cast<Float8e5m2>(i);
EXPECT_TRUE(isfinite(big_e5m2)) << uint16_t{i};
float big_float = static_cast<float>(big_e5m2);
auto big_e4m3 =
Float8e4m3b11fnuz::ConvertFrom<true,
false>(big_float);
if (i > 0x4f) {
EXPECT_EQ(big_e4m3.rep(),
std::numeric_limits<Float8e4m3b11fnuz>::max().rep())
<< uint16_t{i};
}
EXPECT_EQ((Float8e4m3b11fnuz::ConvertFrom<true,
false>(big_e5m2)
.rep()),
big_e4m3.rep())
<< i;
EXPECT_EQ((Float8e4m3b11fnuz::ConvertFrom<true,
false>(-big_e5m2)
.rep()),
(-big_e4m3).rep())
<< i;
}
}
TEST(Float8Test, Float8E4m3b11fnuz_To_Float8E4m3) {
Float8e4m3b11fnuz max = std::numeric_limits<Float8e4m3b11fnuz>::max();
Float8e4m3fn saturated = Float8e4m3fn::ConvertFrom<true>(max);
EXPECT_EQ(static_cast<float>(saturated),
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::max()));
saturated =
Float8e4m3b11fnuz::ConvertTo<Float8e4m3fn, true>(max);
EXPECT_EQ(static_cast<float>(saturated),
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::max()));
Float8e4m3b11fnuz less_than_subnorm =
Float8e4m3b11fnuz::FromRep(0b0011'110);
Float8e4m3fn rounded_subnorm =
Float8e4m3fn::ConvertFrom<false, false>(
less_than_subnorm);
EXPECT_EQ(rounded_subnorm.rep(), 0x04);
Float8e4m3fn truncated_subnorm =
Float8e4m3fn::ConvertFrom<false, true>(
less_than_subnorm);
EXPECT_EQ(truncated_subnorm.rep(), 0x03);
for (uint8_t i = 0;
i < std::numeric_limits<Float8e4m3b11fnuz>::infinity().rep(); ++i) {
Float8e4m3b11fnuz big_e4m3b11fnuz = absl::bit_cast<Float8e4m3b11fnuz>(i);
EXPECT_TRUE(isfinite(big_e4m3b11fnuz)) << uint16_t{i};
float big_float = static_cast<float>(big_e4m3b11fnuz);
auto big_e4m3 =
Float8e4m3fn::ConvertFrom<true, false>(
big_float);
EXPECT_EQ(
(Float8e4m3fn::ConvertFrom<true, false>(
big_e4m3b11fnuz)
.rep()),
big_e4m3.rep())
<< i;
EXPECT_EQ(
(Float8e4m3fn::ConvertFrom<true, false>(
-big_e4m3b11fnuz)
.rep()),
(big_float > 0.0f ? -big_e4m3 : big_e4m3).rep())
<< i;
}
}
TEST(Float8Test, Float8E4m3_To_Float8E5m2) {
Float8e4m3fn less_than_two = Float8e4m3fn::FromRep(0x3F);
Float8e5m2 truncated =
Float8e5m2::template ConvertFrom<false,
true>(less_than_two);
EXPECT_LT(static_cast<float>(truncated), 2);
Float8e5m2 rounded =
Float8e5m2::template ConvertFrom<false,
false>(less_than_two);
EXPECT_EQ(static_cast<float>(rounded), 2);
}
TEST(Float8Test, Half_To_Float8E5m2) {
::half_float::half inf =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0x7C00));
EXPECT_EQ(static_cast<Float8e5m2>(inf).rep(), 0x7C);
::half_float::half ninf =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0xFC00));
EXPECT_EQ(static_cast<Float8e5m2>(ninf).rep(), 0xFC);
::half_float::half nan =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0x7C01));
EXPECT_EQ(static_cast<Float8e5m2>(nan).rep(), 0x7E);
::half_float::half nnan =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0xFC01));
EXPECT_EQ(static_cast<Float8e5m2>(nnan).rep(), 0xFE);
::half_float::half less_than_two =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0x3FFF));
EXPECT_EQ((Float8e5m2::ConvertFrom<false,
false>(less_than_two)
.rep()),
0x40);
EXPECT_EQ((Float8e5m2::ConvertFrom<false,
true>(less_than_two)
.rep()),
0x3F);
EXPECT_EQ((Float8e5m2::ConvertFrom<false,
false>(-less_than_two)
.rep()),
0xC0);
EXPECT_EQ((Float8e5m2::ConvertFrom<false,
true>(-less_than_two)
.rep()),
0xBF);
for (uint16_t i = static_cast<uint16_t>(absl::bit_cast<uint8_t>(
std::numeric_limits<Float8e5m2>::max()))
<< 8;
i < absl::bit_cast<uint16_t>(
std::numeric_limits<::half_float::half>::infinity());
++i) {
::half_float::half big_half = absl::bit_cast<::half_float::half>(i);
float big_float = static_cast<float>(big_half);
EXPECT_EQ((Float8e5m2::ConvertFrom<true, false>(
big_half)
.rep()),
(Float8e5m2::ConvertFrom<true, false>(
big_float)
.rep()))
<< i;
EXPECT_EQ((Float8e5m2::ConvertFrom<true, false>(
-big_half)
.rep()),
(Float8e5m2::ConvertFrom<true, false>(
-big_float)
.rep()))
<< i;
}
}
using ::testing::Eq;
using ::testing::IsTrue;
MATCHER_P(EqOrIsNan, other, "") {
if (isnan(other)) {
return ExplainMatchResult(IsTrue(), isnan(arg), result_listener);
}
return ExplainMatchResult(Eq(other), arg, result_listener);
}
TYPED_TEST(Float8Test, CallTheOperator) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
Float8 a = Float8::FromRep(i);
for (int j = 0x00; j <= 0xFF; ++j) {
Float8 b = Float8::FromRep(j);
EXPECT_THAT(a + b, EqOrIsNan<Float8>(Float8{float{a} + float{b}}));
EXPECT_THAT(a - b, EqOrIsNan<Float8>(Float8{float{a} - float{b}}));
EXPECT_THAT(a * b, EqOrIsNan<Float8>(Float8{float{a} * float{b}}));
EXPECT_THAT(a / b, EqOrIsNan<Float8>(Float8{float{a} / float{b}}));
Float8 c;
EXPECT_THAT((c = a, c += b),
EqOrIsNan<Float8>(Float8{float{a} + float{b}}));
EXPECT_THAT((c = a, c -= b),
EqOrIsNan<Float8>(Float8{float{a} - float{b}}));
EXPECT_THAT((c = a, c *= b),
EqOrIsNan<Float8>(Float8{float{a} * float{b}}));
EXPECT_THAT((c = a, c /= b),
EqOrIsNan<Float8>(Float8{float{a} / float{b}}));
EXPECT_EQ(a == b, float{a} == float{b}) << float{a} << " vs " << float{b};
EXPECT_EQ(a != b, float{a} != float{b});
EXPECT_EQ(a < b, float{a} < float{b});
EXPECT_EQ(a <= b, float{a} <= float{b});
EXPECT_EQ(a > b, float{a} > float{b});
EXPECT_EQ(a >= b, float{a} >= float{b});
}
}
}
TYPED_TEST(Float8Test, CallTheConstOperator) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
const Float8 a = Float8::FromRep(i);
for (int j = 0x00; j <= 0xFF; ++j) {
const Float8 b = Float8::FromRep(j);
EXPECT_THAT(a + b, EqOrIsNan<Float8>(Float8{float{a} + float{b}}));
EXPECT_THAT(a - b, EqOrIsNan<Float8>(Float8{float{a} - float{b}}));
EXPECT_THAT(a * b, EqOrIsNan<Float8>(Float8{float{a} * float{b}}));
EXPECT_THAT(a / b, EqOrIsNan<Float8>(Float8{float{a} / float{b}}));
Float8 c;
EXPECT_THAT((c = a, c += b),
EqOrIsNan<Float8>(Float8{float{a} + float{b}}));
EXPECT_THAT((c = a, c -= b),
EqOrIsNan<Float8>(Float8{float{a} - float{b}}));
EXPECT_THAT((c = a, c *= b),
EqOrIsNan<Float8>(Float8{float{a} * float{b}}));
EXPECT_THAT((c = a, c /= b),
EqOrIsNan<Float8>(Float8{float{a} / float{b}}));
EXPECT_EQ(a == b, float{a} == float{b}) << float{a} << " vs " << float{b};
EXPECT_EQ(a != b, float{a} != float{b});
EXPECT_EQ(a < b, float{a} < float{b}) << float{a} << " vs " << float{b};
EXPECT_EQ(a <= b, float{a} <= float{b});
EXPECT_EQ(a > b, float{a} > float{b}) << float{a} << " vs " << float{b};
EXPECT_EQ(a >= b, float{a} >= float{b});
}
}
}
TEST(Float855m2Test, SmallCastToDenormal) {
float x = std::ldexp(1.3125, -15);
Float8e5m2 y = static_cast<Float8e5m2>(x);
float z = static_cast<float>(y);
EXPECT_EQ(z, std::ldexp(1.5, -15));
}
struct Float8CastTestParamNames {
template <typename TypeParam>
static std::string GetName(int idx) {
using first_type = typename TypeParam::first_type;
using second_type = typename TypeParam::second_type;
return absl::StrCat(::testing::internal::GetTypeName<first_type>(), "_",
::testing::internal::GetTypeName<second_type>());
}
};
#define GEN_LONG_DOUBLE_PAIR(Type) std::pair<Type, long double>,
#define GEN_DEST_TYPES(Type) \
GEN_LONG_DOUBLE_PAIR(Type) \
std::pair<Type, double>, std::pair<Type, float>, \
std::pair<Type, tensorstore::BFloat16>, \
std::pair<Type, ::half_float::half>, std::pair<Type, Float8e4m3fn>, \
std::pair<Type, Float8e4m3b11fnuz>, std::pair<Type, Float8e4m3fnuz>, \
std::pair<Type, Float8e5m2fnuz>, std::pair<Type, Float8e5m2>, \
std::pair<Type, bool>, std::pair<Type, int32_t>, \
std::pair<Type, int64_t>
#define GEN_TYPE_PAIRS() \
GEN_DEST_TYPES(Float8e4m3fn), GEN_DEST_TYPES(Float8e4m3b11fnuz), \
GEN_DEST_TYPES(Float8e5m2), GEN_DEST_TYPES(Float8e4m3fnuz), \
GEN_DEST_TYPES(Float8e5m2fnuz)
using Float8CastTypePairs = ::testing::Types<GEN_TYPE_PAIRS()>;
template <typename CastPair>
class Float8CastTest : public ::testing::Test {};
TYPED_TEST_SUITE(Float8CastTest, Float8CastTypePairs, Float8CastTestParamNames);
TYPED_TEST(Float8CastTest, CastThroughFloat) {
using Float8 = typename TypeParam::first_type;
using DestType = typename TypeParam::second_type;
for (int i = 0x00; i <= 0xFF; ++i) {
Float8 f8 = Float8::FromRep(i);
if constexpr (std::numeric_limits<DestType>::is_integer &&
!std::is_same_v<DestType, bool>) {
if (!isfinite(f8)) {
continue;
}
}
DestType dest = static_cast<DestType>(f8);
DestType expected = static_cast<DestType>(static_cast<float>(f8));
if constexpr (std::numeric_limits<DestType>::is_integer) {
EXPECT_EQ(dest, expected);
} else {
EXPECT_THAT(dest, EqOrIsNan<DestType>(expected));
}
}
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/float8.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/float8_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
43526799-73a2-492f-aa82-ab1df36fdb29 | cpp | google/tensorstore | result | tensorstore/serialization/result.h | tensorstore/serialization/result_test.cc | #ifndef TENSORSTORE_SERIALIZATION_RESULT_H_
#define TENSORSTORE_SERIALIZATION_RESULT_H_
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/status.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace serialization {
template <typename T>
struct Serializer<Result<T>> {
[[nodiscard]] static bool Encode(EncodeSink& sink, const Result<T>& value) {
return serialization::Encode(sink, value.ok()) &&
(value.ok() ? serialization::Encode(sink, *value)
: serialization::Encode(sink, value.status()));
}
[[nodiscard]] static bool Decode(DecodeSource& source, Result<T>& value) {
bool has_value;
if (!serialization::Decode(source, has_value)) return false;
if (has_value) {
return serialization::Decode(source, value.emplace());
} else {
absl::Status status;
if (!ErrorStatusSerializer::Decode(source, status)) return false;
value = std::move(status);
return true;
}
}
};
}
}
#endif | #include "tensorstore/serialization/result.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(ResultTest, OkRoundTrip) {
TestSerializationRoundTrip(tensorstore::Result<int>(3));
TestSerializationRoundTrip(tensorstore::Result<int>(4));
}
TEST(StatusTest, ErrorRoundTrip) {
TestSerializationRoundTrip(
tensorstore::Result<int>(absl::InternalError("abc")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/result.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/result_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
18f87e2e-9b1a-444a-9ef9-eca87df828f3 | cpp | google/tensorstore | stop_token | tensorstore/util/stop_token.h | tensorstore/util/stop_token_test.cc | #ifndef TENSORSTORE_UTIL_STOP_TOKEN_H_
#define TENSORSTORE_UTIL_STOP_TOKEN_H_
#include <atomic>
#include <cstddef>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/util/stop_token_impl.h"
namespace tensorstore {
class StopSource;
template <typename Callback>
class StopCallback;
class StopToken {
public:
StopToken() noexcept = default;
~StopToken() noexcept = default;
StopToken(const StopToken&) noexcept = default;
StopToken(StopToken&&) noexcept = default;
StopToken& operator=(const StopToken&) noexcept = default;
StopToken& operator=(StopToken&&) noexcept = default;
[[nodiscard]] bool stop_possible() const noexcept {
return state_ != nullptr;
}
[[nodiscard]] bool stop_requested() const noexcept {
return state_ != nullptr && state_->stop_requested();
}
friend bool operator==(const StopToken& a, const StopToken& b) {
return a.state_ == b.state_;
}
friend bool operator!=(const StopToken& a, const StopToken& b) {
return !(a == b);
}
private:
friend class StopSource;
template <typename Callback>
friend class StopCallback;
StopToken(internal::IntrusivePtr<internal_stop_token::StopState> state)
: state_(std::move(state)) {}
internal::IntrusivePtr<internal_stop_token::StopState> state_{nullptr};
};
class StopSource {
public:
StopSource() noexcept
: state_(internal::MakeIntrusivePtr<internal_stop_token::StopState>()) {}
explicit StopSource(std::nullptr_t) noexcept : state_(nullptr) {}
~StopSource() noexcept = default;
StopSource(const StopSource& b) noexcept = default;
StopSource(StopSource&&) noexcept = default;
StopSource& operator=(const StopSource& b) noexcept = default;
StopSource& operator=(StopSource&&) noexcept = default;
[[nodiscard]] bool stop_possible() const noexcept {
return state_ != nullptr;
}
[[nodiscard]] bool stop_requested() const noexcept {
return state_ != nullptr && state_->stop_requested();
}
bool request_stop() const noexcept {
if (state_ != nullptr) {
return state_->RequestStop();
}
return false;
}
[[nodiscard]] StopToken get_token() const noexcept {
return StopToken(state_);
}
private:
internal::IntrusivePtr<internal_stop_token::StopState> state_;
};
template <typename Callback>
class StopCallback : private internal_stop_token::StopCallbackBase {
static_assert(std::is_invocable_v<Callback>);
public:
using callback_type = Callback;
StopCallback(const StopCallback&) = delete;
StopCallback& operator=(const StopCallback&) = delete;
StopCallback(StopCallback&&) = delete;
StopCallback& operator=(StopCallback&&) = delete;
template <
typename... Args,
std::enable_if_t<std::is_constructible_v<Callback, Args...>, int> = 0>
explicit StopCallback(const StopToken& token, Args&&... args)
: callback_(std::forward<Args>(args)...) {
internal_stop_token::StopState* state = token.state_.get();
if (state) {
invoker_ = &StopCallback::Invoker;
state->RegisterImpl(*this);
}
}
~StopCallback() {
internal_stop_token::StopState* state =
state_.exchange(nullptr, std::memory_order_acq_rel);
if (state != nullptr) {
state->UnregisterImpl(*this);
}
}
private:
static void Invoker(internal_stop_token::StopCallbackBase& self) noexcept {
static_cast<Callback&&>(static_cast<StopCallback&&>(self).callback_)();
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Callback callback_;
};
template <typename Callback>
StopCallback(StopToken token, Callback callback) -> StopCallback<Callback>;
}
#endif | #include "tensorstore/util/stop_token.h"
#include <functional>
#include <optional>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/testing/concurrent.h"
namespace {
TEST(StopTokenTest, Invariants) {
tensorstore::StopSource source;
EXPECT_TRUE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
tensorstore::StopToken token = source.get_token();
EXPECT_TRUE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
EXPECT_EQ(token, source.get_token());
EXPECT_TRUE(source.request_stop());
EXPECT_TRUE(source.stop_possible());
EXPECT_TRUE(source.stop_requested());
EXPECT_TRUE(token.stop_requested());
{
tensorstore::StopSource source2;
EXPECT_NE(token, source2.get_token());
}
}
TEST(StopTokenTest, Invariants_Null) {
tensorstore::StopSource source(nullptr);
EXPECT_FALSE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
tensorstore::StopToken token = source.get_token();
EXPECT_FALSE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
EXPECT_EQ(token, source.get_token());
EXPECT_FALSE(source.request_stop());
EXPECT_FALSE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
EXPECT_FALSE(token.stop_requested());
{
tensorstore::StopSource source2;
EXPECT_NE(token, source2.get_token());
}
}
TEST(StopTokenTest, Basic_InScope) {
tensorstore::StopSource source;
bool called = false;
{
tensorstore::StopCallback callback(source.get_token(),
[&]() { called = true; });
EXPECT_FALSE(called);
EXPECT_TRUE(source.request_stop());
}
EXPECT_TRUE(called);
}
TEST(StopTokenTest, Basic_NotInScope) {
tensorstore::StopSource source;
bool called = false;
{
tensorstore::StopCallback callback(source.get_token(),
[&]() { called = true; });
EXPECT_FALSE(called);
}
EXPECT_TRUE(source.request_stop());
EXPECT_FALSE(called);
}
TEST(StopTokenTest, Basic_Null) {
tensorstore::StopSource source(nullptr);
bool called = false;
{
tensorstore::StopCallback callback(source.get_token(),
[&]() { called = true; });
EXPECT_FALSE(called);
EXPECT_FALSE(source.request_stop());
}
EXPECT_FALSE(called);
}
TEST(StopTokenTest, StopAlreadyRequested) {
tensorstore::StopSource source;
EXPECT_TRUE(source.request_stop());
bool called = false;
tensorstore::StopCallback callback(source.get_token(),
[&]() { called = true; });
EXPECT_TRUE(called);
}
TEST(StopTokenTest, CallbackOrder) {
bool called[3] = {};
auto do_nothing = []() {};
using DoNothingCallback = tensorstore::StopCallback<decltype(do_nothing)>;
tensorstore::StopSource source;
auto x = std::make_unique<DoNothingCallback>(source.get_token(), do_nothing);
tensorstore::StopCallback callback0(source.get_token(), [&]() {
EXPECT_TRUE(called[1]);
called[0] = true;
});
tensorstore::StopCallback callback1(source.get_token(), [&]() {
EXPECT_TRUE(called[2]);
called[1] = true;
});
tensorstore::StopCallback callback2(source.get_token(), [&]() {
EXPECT_FALSE(called[0]);
called[2] = true;
});
{ DoNothingCallback tmp(source.get_token(), do_nothing); }
x = nullptr;
EXPECT_TRUE(source.request_stop());
EXPECT_TRUE(called[2]);
}
TEST(StopCallbackTest, InvokeValueCategory) {
struct Callback {
void operator()() const& { value += 1; }
void operator()() && { value += 100; }
int& value;
};
tensorstore::StopSource source;
int counts[3] = {};
tensorstore::StopCallback stop_callback0(source.get_token(),
Callback{counts[0]});
Callback callback1{counts[1]};
tensorstore::StopCallback<Callback&> stop_callback1(source.get_token(),
callback1);
tensorstore::StopCallback<const Callback> stop_callback2(source.get_token(),
Callback{counts[2]});
source.request_stop();
EXPECT_THAT(counts, ::testing::ElementsAre(100, 1, 1));
}
TEST(StopTokenTest, SelfDeregister) {
tensorstore::StopSource source;
std::optional<tensorstore::StopCallback<std::function<void()>>> callback{
std::in_place, source.get_token(), [&] { callback = std::nullopt; }};
EXPECT_TRUE(source.request_stop());
EXPECT_FALSE(callback.has_value());
}
TEST(StopTokenTest, Concurrent) {
tensorstore::StopSource source;
bool called = false;
std::optional<tensorstore::StopCallback<std::function<void()>>> callback;
::tensorstore::internal_testing::TestConcurrent(
100,
[&] {
tensorstore::StopSource new_source;
source = std::move(new_source);
called = false;
},
[&] {
EXPECT_TRUE(source.stop_requested());
callback = std::nullopt;
EXPECT_TRUE(called);
},
[&] { callback.emplace(source.get_token(), [&]() { called = true; }); },
[&] { source.request_stop(); },
[&] {
tensorstore::StopCallback callback(source.get_token(), []() {});
}
);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/stop_token.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/stop_token_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ac09648e-4986-4aff-bf5f-113cdfb15936 | cpp | google/tensorstore | int4 | tensorstore/util/int4.h | tensorstore/util/int4_test.cc | #ifndef TENSORSTORE_UTIL_INT4_H_
#define TENSORSTORE_UTIL_INT4_H_
#include <cmath>
#include <cstdint>
#include <limits>
#include <type_traits>
#include <nlohmann/json_fwd.hpp>
namespace tensorstore {
class Int4Padded;
}
namespace std {
template <>
struct numeric_limits<::tensorstore::Int4Padded>;
}
namespace tensorstore {
namespace internal {
constexpr int8_t SignedTrunc4(int8_t x) {
return static_cast<int8_t>(static_cast<uint8_t>(x) << 4) >> 4;
}
}
class Int4Padded {
public:
constexpr Int4Padded() : rep_(0) {}
template <typename T,
typename = std::enable_if_t<std::is_convertible_v<T, int8_t>>>
constexpr explicit Int4Padded(T x)
: rep_(internal::SignedTrunc4(static_cast<int8_t>(x))) {}
constexpr operator int8_t() const {
return internal::SignedTrunc4(rep_);
}
Int4Padded& operator=(bool v) { return *this = static_cast<Int4Padded>(v); }
template <typename T>
std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded&> operator=(
T v) {
return *this = static_cast<Int4Padded>(v);
}
#define TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(OP) \
friend Int4Padded operator OP(Int4Padded a, Int4Padded b) { \
return Int4Padded(a.rep_ OP b.rep_); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded> \
operator OP(Int4Padded a, T b) { \
return Int4Padded(a.rep_ OP b); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded> \
operator OP(T a, Int4Padded b) { \
return Int4Padded(a OP b.rep_); \
} \
#define TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(OP) \
friend Int4Padded& operator OP##=(Int4Padded& a, Int4Padded b) { \
return a = Int4Padded(a.rep_ OP b.rep_); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded&> \
operator OP##=(Int4Padded& a, T b) { \
return a = Int4Padded(a.rep_ OP b); \
} \
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(+)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(+)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(-)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(-)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(*)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(*)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(/)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(/)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(%)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(%)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(&)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(&)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(|)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(|)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(^)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(^)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(<<)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(<<)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(>>)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(>>)
#undef TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP
#undef TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP
friend Int4Padded operator~(Int4Padded a) {
Int4Padded result;
result.rep_ = internal::SignedTrunc4(~a.rep_);
return result;
}
friend Int4Padded operator-(Int4Padded a) {
Int4Padded result;
result.rep_ = internal::SignedTrunc4(-a.rep_);
return result;
}
friend Int4Padded operator+(Int4Padded a) { return a; }
friend Int4Padded operator++(Int4Padded& a) {
a += Int4Padded(1);
return a;
}
friend Int4Padded operator--(Int4Padded& a) {
a -= Int4Padded(1);
return a;
}
friend Int4Padded operator++(Int4Padded& a, int) {
Int4Padded original_value = a;
++a;
return original_value;
}
friend Int4Padded operator--(Int4Padded& a, int) {
Int4Padded original_value = a;
--a;
return original_value;
}
template <template <typename U, typename V, typename... Args>
class ObjectType ,
template <typename U, typename... Args>
class ArrayType ,
class StringType , class BooleanType ,
class NumberIntegerType ,
class NumberUnsignedType ,
class NumberFloatType ,
template <typename U> class AllocatorType ,
template <typename T, typename SFINAE = void>
class JSONSerializer ,
class BinaryType >
friend void to_json(
::nlohmann::basic_json<ObjectType, ArrayType, StringType, BooleanType,
NumberIntegerType, NumberUnsignedType,
NumberFloatType, AllocatorType, JSONSerializer,
BinaryType>& j,
Int4Padded v) {
j = static_cast<NumberIntegerType>(v);
}
constexpr friend bool operator==(const Int4Padded& a, const Int4Padded& b) {
return internal::SignedTrunc4(a.rep_) == internal::SignedTrunc4(b.rep_);
}
constexpr friend bool operator!=(const Int4Padded& a, const Int4Padded& b) {
return !(a == b);
}
struct bitcast_construct_t {};
explicit constexpr Int4Padded(bitcast_construct_t, int8_t rep) : rep_(rep) {}
int8_t rep_;
};
inline Int4Padded abs(Int4Padded x) {
x.rep_ = internal::SignedTrunc4(::std::abs(x.rep_));
return x;
}
inline Int4Padded pow(Int4Padded x, Int4Padded y) {
return Int4Padded(std::pow(static_cast<int8_t>(x), static_cast<int8_t>(y)));
}
}
namespace std {
template <>
struct numeric_limits<tensorstore::Int4Padded> {
static constexpr bool is_specialized = true;
static constexpr bool is_signed = true;
static constexpr bool is_integer = true;
static constexpr bool is_exact = true;
static constexpr bool has_infinity = false;
static constexpr bool has_quiet_NaN = false;
static constexpr bool has_signaling_NaN = false;
static constexpr bool is_bounded = true;
static constexpr bool is_modulo = true;
static constexpr int digits = 3;
static constexpr int digits10 = 0;
static constexpr int max_digits10 = 0;
static constexpr int radix = 2;
static constexpr tensorstore::Int4Padded min() {
return tensorstore::Int4Padded(
tensorstore::Int4Padded::bitcast_construct_t{}, int8_t{-8});
}
static constexpr tensorstore::Int4Padded lowest() {
return tensorstore::Int4Padded(
tensorstore::Int4Padded::bitcast_construct_t{}, int8_t{-8});
}
static constexpr tensorstore::Int4Padded max() {
return tensorstore::Int4Padded(
tensorstore::Int4Padded::bitcast_construct_t{}, int8_t{7});
}
};
}
#endif | #include "tensorstore/util/int4.h"
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_gtest.h"
namespace {
using Int4 = tensorstore::Int4Padded;
Int4 Bitcast(int8_t x) { return absl::bit_cast<Int4>(x); }
constexpr std::pair<int8_t, Int4> kInt8ToInt4[] = {
{-10, Int4(6)}, {-9, Int4(7)}, {-8, Int4(-8)}, {-7, Int4(-7)},
{-6, Int4(-6)}, {-5, Int4(-5)}, {-4, Int4(-4)}, {-3, Int4(-3)},
{-2, Int4(-2)}, {-1, Int4(-1)}, {0, Int4(0)}, {1, Int4(1)},
{2, Int4(2)}, {3, Int4(3)}, {4, Int4(4)}, {5, Int4(5)},
{6, Int4(6)}, {7, Int4(7)}, {8, Int4(-8)}, {9, Int4(-7)},
{10, Int4(-6)},
};
constexpr std::pair<Int4, int8_t> kInt4ToInt8[] = {
{Int4(-8), -8}, {Int4(-7), -7}, {Int4(-6), -6}, {Int4(-5), -5},
{Int4(-4), -4}, {Int4(-3), -3}, {Int4(-2), -2}, {Int4(-1), -1},
{Int4(0), 0}, {Int4(1), 1}, {Int4(2), 2}, {Int4(3), 3},
{Int4(4), 4}, {Int4(5), 5}, {Int4(6), 6}, {Int4(7), 7},
};
TEST(Int4Test, Int8ToInt4) {
for (const auto& [i8, i4] : kInt8ToInt4) {
EXPECT_EQ(static_cast<Int4>(i8), i4);
}
}
TEST(Int4Test, Int4ToInt8) {
for (const auto& [i4, i8] : kInt4ToInt8) {
EXPECT_EQ(static_cast<int8_t>(i4), i8);
}
}
template <typename X>
void TestInt4ToXToInt4() {
for (const auto& [i4, i8] : kInt4ToInt8) {
EXPECT_EQ(static_cast<Int4>(static_cast<X>(i4)), i4);
}
}
TEST(Int4Test, Int4ToInt32ToInt4) { TestInt4ToXToInt4<int32_t>(); }
TEST(Int4Test, Int4ToFloatToInt4) { TestInt4ToXToInt4<float>(); }
TEST(Int4Test, Int4ToDoubleToInt4) { TestInt4ToXToInt4<double>(); }
TEST(Int4Test, Arithmetic) {
EXPECT_EQ(Int4(1) + Int4(2), Int4(3));
EXPECT_EQ(Int4(7) + Int4(2), Int4(-7));
EXPECT_EQ(Int4(3) - Int4(5), Int4(-2));
EXPECT_EQ(Int4(5) * Int4(-7), Int4(-3));
EXPECT_EQ(Int4(-8) / Int4(3), Int4(-2));
EXPECT_EQ(Int4(-7) % Int4(3), Int4(-1));
}
TEST(Int4Test, BitwiseBinary) {
EXPECT_EQ(Int4(0b0110) & Int4(0b1011), Int4(0b0010));
EXPECT_EQ(Int4(0b0110) | Int4(0b1011), Int4(0b1111));
EXPECT_EQ(Int4(0b0110) ^ Int4(0b1011), Int4(0b1101));
}
TEST(Int4Test, BitwiseUnaryInverse) {
EXPECT_EQ(~Int4(0b1011), Int4(0b0100));
EXPECT_EQ(~Int4(0b0110), Int4(0b1001));
}
TEST(Int4Test, BitwiseShift) {
EXPECT_EQ(Int4(0b0011) << Int4(0), Int4(0b0011));
EXPECT_EQ(Int4(0b0011) << Int4(1), Int4(0b0110));
EXPECT_EQ(Int4(0b0011) << Int4(2), Int4(0b1100));
EXPECT_EQ(Int4(0b0011) << Int4(3), Int4(0b1000));
EXPECT_EQ(Int4(0b0011) << Int4(4), Int4(0b0000));
EXPECT_EQ(Int4(0b0011) << Int4(5), Int4(0b0000));
EXPECT_EQ(Int4(0b0011) << int8_t{0}, Int4(0b0011));
EXPECT_EQ(Int4(0b0011) << int8_t{1}, Int4(0b0110));
EXPECT_EQ(Int4(0b0011) << int8_t{2}, Int4(0b1100));
EXPECT_EQ(Int4(0b0011) << int8_t{3}, Int4(0b1000));
EXPECT_EQ(Int4(0b0011) << int8_t{4}, Int4(0b0000));
EXPECT_EQ(Int4(0b0011) << int8_t{5}, Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> Int4(0), Int4(0b0100));
EXPECT_EQ(Int4(0b0100) >> Int4(1), Int4(0b0010));
EXPECT_EQ(Int4(0b0100) >> Int4(2), Int4(0b0001));
EXPECT_EQ(Int4(0b0100) >> Int4(3), Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> Int4(4), Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> Int4(5), Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> int8_t{0}, Int4(0b0100));
EXPECT_EQ(Int4(0b0100) >> int8_t{1}, Int4(0b0010));
EXPECT_EQ(Int4(0b0100) >> int8_t{2}, Int4(0b0001));
EXPECT_EQ(Int4(0b0100) >> int8_t{3}, Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> int8_t{4}, Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> int8_t{5}, Int4(0b0000));
EXPECT_EQ(Int4(0b1010) >> Int4(0), Int4(0b1010));
EXPECT_EQ(Int4(0b1010) >> Int4(1), Int4(0b1101));
EXPECT_EQ(Int4(0b1010) >> Int4(2), Int4(0b1110));
EXPECT_EQ(Int4(0b1010) >> Int4(3), Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> Int4(4), Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> Int4(5), Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> int8_t{0}, Int4(0b1010));
EXPECT_EQ(Int4(0b1010) >> int8_t{1}, Int4(0b1101));
EXPECT_EQ(Int4(0b1010) >> int8_t{2}, Int4(0b1110));
EXPECT_EQ(Int4(0b1010) >> int8_t{3}, Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> int8_t{4}, Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> int8_t{5}, Int4(0b1111));
}
TEST(Int4Test, Abs) {
EXPECT_EQ(abs(Int4(7)), Int4(7));
EXPECT_EQ(abs(Int4(0)), Int4(0));
EXPECT_EQ(abs(Int4(-7)), Int4(7));
EXPECT_EQ(abs(Int4(-8)), Int4(-8));
}
TEST(Int4Test, Pow) {
EXPECT_EQ(pow(Int4(2), Int4(0)), Int4(1));
EXPECT_EQ(pow(Int4(2), Int4(1)), Int4(2));
EXPECT_EQ(pow(Int4(2), Int4(2)), Int4(4));
}
TEST(Int4Test, Comparison) {
for (int i = 0; i <= 15; i++) {
const Int4 a = kInt4ToInt8[i].first;
EXPECT_EQ(a, a);
EXPECT_LE(a, a);
EXPECT_GE(a, a);
for (int j = i + 1; j <= 15; j++) {
const Int4 b = kInt4ToInt8[j].first;
EXPECT_NE(a, b);
EXPECT_LT(a, b);
EXPECT_LE(a, b);
EXPECT_GT(b, a);
EXPECT_GE(b, a);
}
}
}
TEST(Int4Test, EquivalentRepresentationsCompareEqual) {
for (int low_nibble = 0; low_nibble <= 15; low_nibble++) {
const Int4 answer = Int4(low_nibble);
for (int high_nibble_a = 0; high_nibble_a <= 15; high_nibble_a++) {
for (int high_nibble_b = 0; high_nibble_b <= 15; high_nibble_b++) {
const int8_t a = low_nibble | (high_nibble_a << 4);
const int8_t b = low_nibble | (high_nibble_b << 4);
const Int4 a4 = Bitcast(a);
const Int4 b4 = Bitcast(b);
EXPECT_EQ(a4, answer);
EXPECT_EQ(b4, answer);
EXPECT_EQ(a4, b4);
}
}
}
}
TEST(Int4Test, NonCanonicalRepresentationsCompareCorrectly) {
EXPECT_LT(Bitcast(0xD3), Bitcast(0xE5));
EXPECT_LE(Bitcast(0xD3), Bitcast(0xE5));
EXPECT_GT(Bitcast(0x33), Bitcast(0x4A));
EXPECT_GE(Bitcast(0x33), Bitcast(0x4A));
}
TEST(Int4Test, JsonConversion) {
for (const auto& [i4, i8] : kInt4ToInt8) {
EXPECT_THAT(::nlohmann::json(i4), tensorstore::MatchesJson(i8));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/int4.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/int4_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
286832a6-e242-4090-a201-48cb5d2db23c | cpp | google/tensorstore | json_absl_flag | tensorstore/util/json_absl_flag.h | tensorstore/util/json_absl_flag_test.cc | #ifndef TENSORSTORE_UTIL_JSON_ABSL_FLAG_H_
#define TENSORSTORE_UTIL_JSON_ABSL_FLAG_H_
#include <string>
#include <string_view>
#include <type_traits>
#include "absl/flags/marshalling.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
template <typename T>
struct JsonAbslFlag {
T value;
JsonAbslFlag() = default;
template <typename... U,
typename = std::enable_if_t<std::is_constructible_v<T, U&&...>>>
JsonAbslFlag(U&&... arg) : value(std::forward<U>(arg)...) {}
friend std::string AbslUnparseFlag(const JsonAbslFlag& json_flag) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto j, internal_json_binding::ToJson(json_flag.value), "");
if (j.is_discarded()) return {};
return absl::UnparseFlag(j.dump());
}
friend bool AbslParseFlag(std::string_view in, JsonAbslFlag* out,
std::string* error) {
if (in.empty()) {
out->value = {};
return true;
}
::nlohmann::json j = ::nlohmann::json::parse(in, nullptr, false);
if (j.is_discarded()) {
*error = absl::StrFormat("Failed to parse JSON: '%s'", in);
return false;
}
T new_value = {};
absl::Status status = internal_json_binding::DefaultBinder<>(
std::true_type{}, internal_json_binding::NoOptions{}, &new_value, &j);
if (!status.ok()) {
*error = absl::StrFormat("Failed to bind JSON: %s", status.message());
return false;
}
out->value = std::move(new_value);
return true;
}
};
}
#endif | #include "tensorstore/util/json_absl_flag.h"
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/kvstore/spec.h"
namespace {
TEST(JsonAbslFlag, IntFlag) {
tensorstore::JsonAbslFlag<int64_t> flag = {};
std::string default_value = AbslUnparseFlag(flag);
std::string error;
EXPECT_TRUE(AbslParseFlag(default_value, &flag, &error));
EXPECT_TRUE(error.empty());
}
TEST(JsonAbslFlag, KvStoreSpecFlag) {
tensorstore::JsonAbslFlag<tensorstore::kvstore::Spec> flag = {};
std::string default_value = AbslUnparseFlag(flag);
std::string error;
EXPECT_TRUE(AbslParseFlag(default_value, &flag, &error))
<< "value: " << default_value;
EXPECT_TRUE(error.empty()) << error;
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/json_absl_flag.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/json_absl_flag_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
24bb0006-b05f-484b-99d7-4f82b5dc15ee | cpp | google/tensorstore | executor | tensorstore/util/executor.h | tensorstore/util/executor_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTOR_H_
#define TENSORSTORE_UTIL_EXECUTOR_H_
#include <functional>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/functional/any_invocable.h"
#include "absl/meta/type_traits.h"
#include "tensorstore/internal/poly/poly.h"
#include "tensorstore/internal/type_traits.h"
namespace tensorstore {
using ExecutorTask = absl::AnyInvocable<void() &&>;
using Executor = poly::Poly<0, true, void(ExecutorTask) const>;
class InlineExecutor {
public:
template <typename Func>
void operator()(Func&& func) const {
std::forward<Func>(func)();
}
};
template <typename ExecutorType, typename FunctionType>
class ExecutorBoundFunction {
public:
using Executor = ExecutorType;
using Function = FunctionType;
template <typename... T>
std::enable_if_t<std::is_invocable_v<Function&, T...>>
operator()(T&&... arg) {
executor(std::bind(std::move(function), std::forward<T>(arg)...));
}
template <typename... T>
std::enable_if_t<std::is_invocable_v<const Function&, T...>> operator()(
T&&... arg) const {
executor(std::bind(function, std::forward<T>(arg)...));
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Executor executor;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Function function;
};
template <typename Executor, typename Function>
std::enable_if_t<
!std::is_same_v<absl::remove_cvref_t<Executor>, InlineExecutor>,
ExecutorBoundFunction<absl::remove_cvref_t<Executor>,
absl::remove_cvref_t<Function>>>
WithExecutor(Executor&& executor, Function&& function) {
return {std::forward<Executor>(executor), std::forward<Function>(function)};
}
template <typename Executor, typename Function>
std::enable_if_t<std::is_same_v<absl::remove_cvref_t<Executor>, InlineExecutor>,
Function&&>
WithExecutor(Executor&& executor, Function&& function) {
return std::forward<Function>(function);
}
}
#endif | #include "tensorstore/util/executor.h"
#include <functional>
#include <memory>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::Executor;
using ::tensorstore::InlineExecutor;
using ::tensorstore::WithExecutor;
TEST(InlineExecutorTest, Basic) {
Executor executor = InlineExecutor{};
bool invoked = false;
executor([&] { invoked = true; });
EXPECT_TRUE(invoked);
}
TEST(WithExecutorTest, NonConst) {
InlineExecutor executor;
bool invoked = false;
struct Func {
void operator()(bool* x) const = delete;
void operator()(bool* x) { *x = true; }
};
auto with_executor = WithExecutor(executor, Func{});
with_executor(&invoked);
EXPECT_TRUE(invoked);
}
TEST(WithExecutorTest, Const) {
InlineExecutor executor;
bool invoked = false;
struct Func {
void operator()(bool* x) const { *x = true; }
void operator()(bool*) = delete;
};
const auto with_executor = WithExecutor(executor, Func{});
with_executor(&invoked);
EXPECT_TRUE(invoked);
}
TEST(ExecutorTest, MoveOnly) {
Executor executor = InlineExecutor{};
int value = 0;
executor(std::bind([&](const std::unique_ptr<int>& ptr) { value = *ptr; },
std::make_unique<int>(3)));
EXPECT_EQ(3, value);
}
TEST(WithExecutorTest, MoveOnly) {
Executor executor = InlineExecutor{};
int value = 0;
auto with_executor = WithExecutor(
executor,
std::bind([&](const std::unique_ptr<int>& ptr) { value = *ptr; },
std::make_unique<int>(3)));
with_executor();
EXPECT_EQ(3, value);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/executor.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/executor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |