Original Code File
stringlengths 196
31.9k
| Oniginal Ground Truth
stringlengths 78
32k
| Code
stringlengths 15
27.3k
| Unit Test
stringlengths 0
30.4k
|
---|---|---|---|
#include "tsl/platform/coding.h"
#include "tsl/platform/byte_order.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/tstring.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace core {
void EncodeFixed16(char* buf, uint16 value) {
if (port::kLittleEndian) {
memcpy(buf, &value, sizeof(value));
} else {
buf[0] = value & 0xff;
buf[1] = (value >> 8) & 0xff;
}
}
void EncodeFixed32(char* buf, uint32 value) {
if (port::kLittleEndian) {
memcpy(buf, &value, sizeof(value));
} else {
buf[0] = value & 0xff;
buf[1] = (value >> 8) & 0xff;
buf[2] = (value >> 16) & 0xff;
buf[3] = (value >> 24) & 0xff;
}
}
void EncodeFixed64(char* buf, uint64 value) {
if (port::kLittleEndian) {
memcpy(buf, &value, sizeof(value));
} else {
buf[0] = value & 0xff;
buf[1] = (value >> 8) & 0xff;
buf[2] = (value >> 16) & 0xff;
buf[3] = (value >> 24) & 0xff;
buf[4] = (value >> 32) & 0xff;
buf[5] = (value >> 40) & 0xff;
buf[6] = (value >> 48) & 0xff;
buf[7] = (value >> 56) & 0xff;
}
}
void PutFixed16(string* dst, uint16 value) {
char buf[sizeof(value)];
EncodeFixed16(buf, value);
dst->append(buf, sizeof(buf));
}
void PutFixed32(string* dst, uint32 value) {
char buf[sizeof(value)];
EncodeFixed32(buf, value);
dst->append(buf, sizeof(buf));
}
void PutFixed64(string* dst, uint64 value) {
char buf[sizeof(value)];
EncodeFixed64(buf, value);
dst->append(buf, sizeof(buf));
}
char* EncodeVarint32(char* dst, uint32 v) {
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
static const int B = 128;
if (v < (1 << 7)) {
*(ptr++) = v;
} else if (v < (1 << 14)) {
*(ptr++) = v | B;
*(ptr++) = v >> 7;
} else if (v < (1 << 21)) {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = v >> 14;
} else if (v < (1 << 28)) {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = (v >> 14) | B;
*(ptr++) = v >> 21;
} else {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = (v >> 14) | B;
*(ptr++) = (v >> 21) | B;
*(ptr++) = v >> 28;
}
return reinterpret_cast<char*>(ptr);
}
void PutVarint32(string* dst, uint32 v) {
char buf[5];
char* ptr = EncodeVarint32(buf, v);
dst->append(buf, ptr - buf);
}
void PutVarint32(tstring* dst, uint32 v) {
char buf[5];
char* ptr = EncodeVarint32(buf, v);
dst->append(buf, ptr - buf);
}
char* EncodeVarint64(char* dst, uint64 v) {
static const int B = 128;
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
while (v >= B) {
*(ptr++) = (v & (B - 1)) | B;
v >>= 7;
}
*(ptr++) = static_cast<unsigned char>(v);
return reinterpret_cast<char*>(ptr);
}
void PutVarint64(string* dst, uint64 v) {
char buf[10];
char* ptr = EncodeVarint64(buf, v);
dst->append(buf, ptr - buf);
}
void PutVarint64(tstring* dst, uint64 v) {
char buf[10];
char* ptr = EncodeVarint64(buf, v);
dst->append(buf, ptr - buf);
}
int VarintLength(uint64_t v) {
int len = 1;
while (v >= 128) {
v >>= 7;
len++;
}
return len;
}
const char* GetVarint32Ptr(const char* p, const char* limit, uint32* value) {
if (p < limit) {
uint32 result = *(reinterpret_cast<const unsigned char*>(p));
if ((result & 128) == 0) {
*value = result;
return p + 1;
}
}
return GetVarint32PtrFallback(p, limit, value);
}
const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32* value) {
uint32 result = 0;
for (uint32 shift = 0; shift <= 28 && p < limit; shift += 7) {
uint32 byte = *(reinterpret_cast<const unsigned char*>(p));
p++;
if (byte & 128) {
result |= ((byte & 127) << shift);
} else {
result |= (byte << shift);
*value = result;
return reinterpret_cast<const char*>(p);
}
}
return nullptr;
}
bool GetVarint32(StringPiece* input, uint32* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint32Ptr(p, limit, value);
if (q == nullptr) {
return false;
} else {
*input = StringPiece(q, limit - q);
return true;
}
}
const char* GetVarint64Ptr(const char* p, const char* limit, uint64* value) {
uint64 result = 0;
for (uint32 shift = 0; shift <= 63 && p < limit; shift += 7) {
uint64 byte = *(reinterpret_cast<const unsigned char*>(p));
p++;
if (byte & 128) {
result |= ((byte & 127) << shift);
} else {
result |= (byte << shift);
*value = result;
return reinterpret_cast<const char*>(p);
}
}
return nullptr;
}
bool GetVarint64(StringPiece* input, uint64* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint64Ptr(p, limit, value);
if (q == nullptr) {
return false;
} else {
*input = StringPiece(q, limit - q);
return true;
}
}
}
} | #include "tensorflow/core/lib/core/coding.h"
#include <vector>
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace core {
TEST(Coding, Fixed16) {
static const uint16 N = 50000;
string s;
for (uint16 v = 0; v < N; v++) {
char buf[sizeof(uint16)];
EncodeFixed16(buf, v);
s.append(buf, sizeof(buf));
}
const char* p = s.data();
for (uint16 v = 0; v < N; v++) {
uint16 actual = DecodeFixed16(p);
ASSERT_EQ(v, actual);
p += sizeof(uint16);
}
}
TEST(Coding, Fixed32) {
static const uint32 N = 100000;
string s;
for (uint32 v = 0; v < N; v++) {
char buf[sizeof(uint32)];
EncodeFixed32(buf, v);
s.append(buf, sizeof(buf));
}
const char* p = s.data();
for (uint32 v = 0; v < N; v++) {
uint32 actual = DecodeFixed32(p);
ASSERT_EQ(v, actual);
p += sizeof(uint32);
}
}
TEST(Coding, Fixed64) {
string s;
for (int power = 0; power <= 63; power++) {
uint64 v = static_cast<uint64>(1) << power;
char buf[sizeof(uint64)];
EncodeFixed64(buf, v - 1);
s.append(buf, sizeof(buf));
EncodeFixed64(buf, v + 0);
s.append(buf, sizeof(buf));
EncodeFixed64(buf, v + 1);
s.append(buf, sizeof(buf));
}
const char* p = s.data();
for (int power = 0; power <= 63; power++) {
uint64 v = static_cast<uint64>(1) << power;
uint64 actual;
actual = DecodeFixed64(p);
ASSERT_EQ(v - 1, actual);
p += sizeof(uint64);
actual = DecodeFixed64(p);
ASSERT_EQ(v + 0, actual);
p += sizeof(uint64);
actual = DecodeFixed64(p);
ASSERT_EQ(v + 1, actual);
p += sizeof(uint64);
}
}
TEST(Coding, EncodingOutput) {
char dst[8];
EncodeFixed16(dst, 0x0201);
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
EncodeFixed32(dst, 0x04030201);
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
ASSERT_EQ(0x04, static_cast<int>(dst[3]));
EncodeFixed64(dst, 0x0807060504030201ull);
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
ASSERT_EQ(0x04, static_cast<int>(dst[3]));
ASSERT_EQ(0x05, static_cast<int>(dst[4]));
ASSERT_EQ(0x06, static_cast<int>(dst[5]));
ASSERT_EQ(0x07, static_cast<int>(dst[6]));
ASSERT_EQ(0x08, static_cast<int>(dst[7]));
}
TEST(Coding, Varint32) {
string s;
for (uint32 i = 0; i < (32 * 32); i++) {
uint32 v = (i / 32) << (i % 32);
PutVarint32(&s, v);
}
const char* p = s.data();
const char* limit = p + s.size();
for (uint32 i = 0; i < (32 * 32); i++) {
uint32 expected = (i / 32) << (i % 32);
uint32 actual;
p = GetVarint32Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(expected, actual);
}
ASSERT_EQ(p, s.data() + s.size());
}
TEST(Coding, Varint64) {
std::vector<uint64> values;
values.push_back(0);
values.push_back(100);
values.push_back(~static_cast<uint64>(0));
values.push_back(~static_cast<uint64>(0) - 1);
for (uint32 k = 0; k < 64; k++) {
const uint64 power = 1ull << k;
values.push_back(power);
values.push_back(power - 1);
values.push_back(power + 1);
}
string s;
for (size_t i = 0; i < values.size(); i++) {
PutVarint64(&s, values[i]);
}
const char* p = s.data();
const char* limit = p + s.size();
for (size_t i = 0; i < values.size(); i++) {
ASSERT_TRUE(p < limit);
uint64 actual;
p = GetVarint64Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(values[i], actual);
}
ASSERT_EQ(p, limit);
}
TEST(Coding, Varint32Overflow) {
uint32 result;
string input("\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
&result) == nullptr);
}
TEST(Coding, Varint32Truncation) {
uint32 large_value = (1u << 31) + 100;
string s;
PutVarint32(&s, large_value);
uint32 result;
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
}
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Varint64Overflow) {
uint64 result;
string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
&result) == nullptr);
}
TEST(Coding, Varint64Truncation) {
uint64 large_value = (1ull << 63) + 100ull;
string s;
PutVarint64(&s, large_value);
uint64 result;
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
}
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
nullptr);
ASSERT_EQ(large_value, result);
}
}
} | void PutVarint32(string* dst, uint32 v) {
char buf[5];
char* ptr = EncodeVarint32(buf, v);
dst->append(buf, ptr - buf);
} | TEST(Coding, Varint32) {
string s;
for (uint32 i = 0; i < (32 * 32); i++) {
uint32 v = (i / 32) << (i % 32);
PutVarint32(&s, v);
}
const char* p = s.data();
const char* limit = p + s.size();
for (uint32 i = 0; i < (32 * 32); i++) {
uint32 expected = (i / 32) << (i % 32);
uint32 actual;
p = GetVarint32Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(expected, actual);
}
ASSERT_EQ(p, s.data() + s.size());
} |
#include "tensorstore/index_space/internal/translate_op.h"
#include <algorithm>
#include "absl/status/status.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status TranslateOutputOffsetsUsingInputOffsets(
TransformRep* transform, const Index* input_offsets) {
const DimensionIndex output_rank = transform->output_rank;
const DimensionIndex input_rank = transform->input_rank;
span<OutputIndexMap> maps = transform->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
switch (map.method()) {
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const Index offset_change = input_offsets[input_dim];
Index new_offset;
if (internal::MulOverflow(offset_change, map.stride(), &new_offset) ||
internal::SubOverflow(map.offset(), new_offset, &map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing output offset for dimension ",
output_dim, "."));
}
break;
}
case OutputIndexMethod::array: {
auto& index_array_data = map.index_array_data();
index_array_data.element_pointer = AddByteOffset(
std::move(index_array_data.element_pointer),
-IndexInnerProduct(input_rank, index_array_data.byte_strides,
input_offsets));
break;
}
case OutputIndexMethod::constant:
break;
}
}
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyTranslate(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView offsets,
TranslateOpKind kind,
bool domain_only) {
const DimensionIndex num_dims = dimensions->size();
const DimensionIndex input_rank = transform.input_rank();
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(offsets, num_dims));
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
const auto input_domain = rep->input_domain(input_rank);
Index input_offsets[kMaxRank];
std::fill_n(&input_offsets[0], input_rank, static_cast<Index>(0));
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
Index offset = offsets[i];
if (offset == kImplicit) continue;
const IndexInterval old_interval = input_domain[input_dim];
IndexInterval new_interval;
switch (kind) {
case TranslateOpKind::kTranslateTo: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftIntervalTo(old_interval, offset));
offset = new_interval.inclusive_min() - old_interval.inclusive_min();
break;
}
case TranslateOpKind::kTranslateBackwardBy: {
offset = -offset;
}
[[fallthrough]];
case TranslateOpKind::kTranslateBy: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftInterval(old_interval, offset));
break;
}
}
input_domain[input_dim] = new_interval;
input_offsets[input_dim] = offset;
}
TENSORSTORE_RETURN_IF_ERROR(
TranslateOutputOffsetsUsingInputOffsets(rep.get(), &input_offsets[0]));
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(TranslateByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({11, 2, 23})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {12, 3, 23}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateBackwardByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({-9, 2, -17})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {-8, 3, -17}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateToTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({10, 2, 20})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -9, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -17, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {11, 3, 20}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateByTest, OneDimensionalConstant) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimension) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2 - 3 * 5, 3, 0)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{4}, {4}}});
}
TEST(TranslateByTest, OneDimensionalIndexArray) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
{{{1}, {6}}});
}
TEST(TranslateByTest, AllDimsUniform) {
TestDimExpression(
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 5, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 10})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4, 7, 1)
.output_single_input_dimension(4, 5, 8, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0, 1, 2},
IndexTransformBuilder<3, 3>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, -5, 1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, 1 - 4 * 5, 4, 0)
.output_single_input_dimension(1, 2 - 5 * 5, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4 - 7 * 5, 7, 1)
.output_single_input_dimension(4, 5 - 8 * 5, 8, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5 + 5, 6 + 5}}});
}
TEST(TranslateByTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateBy(span<const Index>({1, 2})),
absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) does not match number of "
"indices \\(2\\)");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-kInfIndex),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMaxFiniteIndex - 1})
.input_shape({2})
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(
0, std::numeric_limits<Index>::min(), 1, 0)
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
"Integer overflow computing output offset .*");
}
TEST(TranslateByTest, DimSubsetUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy(5),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 5, 2, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5, 6 + 5}}});
}
TEST(TranslateByTest, DimSubsetNonUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy({5, 6}),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -6, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 6, 2, 2)
.Finalize()
.value(),
{{{3, 4, 5}, {3 + 5, 4, 5 + 6}}});
}
TEST(TranslateToTest, OneDimensionalConstant) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateTo(8),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_single_input_dimension(0, -3, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
{{{7}, {10}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimension) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, -1, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, 2 - 3, 3, 0)
.Finalize()
.value(),
{{{6}, {7}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{6}, {6}}});
}
TEST(TranslateToTest, TwoDimensionalSingleInputDimensionOneImplicit) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({4, 5})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, 4, 5, 1)
.Finalize()
.value(),
AllDims().TranslateTo({kImplicit, 10}),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, -5, 1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, -25 + 4, 5, 1)
.Finalize()
.value(),
{{{6, 7}, {6, 12}}});
}
TEST(TranslateToTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateTo(1),
absl::StatusCode::kInvalidArgument,
"Interval \\(-inf, \\+inf\\) is not bounded below");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateTo(std::numeric_limits<Index>::max()),
absl::StatusCode::kOutOfRange, "Origin [0-9]+ is outside valid range .*");
}
TEST(TranslateToTest, IndexDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain,
IndexDomainBuilder<3>().origin({1, 2, 3}).shape({6, 7, 8}).Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto translated_domain,
IndexDomainBuilder<3>().origin({4, 5, 6}).shape({6, 7, 8}).Finalize());
EXPECT_THAT(domain | AllDims().TranslateTo({4, 5, 6}),
::testing::Optional(translated_domain));
}
TEST(TranslateToTest, IndexDomainOverflow) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({10})
.output_single_input_dimension(0, kMaxFiniteIndex, kMaxFiniteIndex, 0)
.Finalize());
auto domain = transform.domain();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto translated_domain,
IndexDomainBuilder(1).origin({-5}).shape({10}).Finalize());
EXPECT_THAT(transform | AllDims().TranslateTo({-5}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(domain | AllDims().TranslateTo({-5}),
::testing::Optional(translated_domain));
}
} | Result<IndexTransform<>> ApplyTranslate(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView offsets,
TranslateOpKind kind,
bool domain_only) {
const DimensionIndex num_dims = dimensions->size();
const DimensionIndex input_rank = transform.input_rank();
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(offsets, num_dims));
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
const auto input_domain = rep->input_domain(input_rank);
Index input_offsets[kMaxRank];
std::fill_n(&input_offsets[0], input_rank, static_cast<Index>(0));
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
Index offset = offsets[i];
if (offset == kImplicit) continue;
const IndexInterval old_interval = input_domain[input_dim];
IndexInterval new_interval;
switch (kind) {
case TranslateOpKind::kTranslateTo: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftIntervalTo(old_interval, offset));
offset = new_interval.inclusive_min() - old_interval.inclusive_min();
break;
}
case TranslateOpKind::kTranslateBackwardBy: {
offset = -offset;
}
[[fallthrough]];
case TranslateOpKind::kTranslateBy: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftInterval(old_interval, offset));
break;
}
}
input_domain[input_dim] = new_interval;
input_offsets[input_dim] = offset;
}
TENSORSTORE_RETURN_IF_ERROR(
TranslateOutputOffsetsUsingInputOffsets(rep.get(), &input_offsets[0]));
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
} | TEST(TranslateByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({11, 2, 23})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {12, 3, 23}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateBackwardByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({-9, 2, -17})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {-8, 3, -17}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateToTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({10, 2, 20})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -9, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -17, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {11, 3, 20}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateByTest, OneDimensionalConstant) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimension) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2 - 3 * 5, 3, 0)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{4}, {4}}});
}
TEST(TranslateByTest, OneDimensionalIndexArray) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
{{{1}, {6}}});
}
TEST(TranslateByTest, AllDimsUniform) {
TestDimExpression(
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 5, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 10})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4, 7, 1)
.output_single_input_dimension(4, 5, 8, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0, 1, 2},
IndexTransformBuilder<3, 3>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, -5, 1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, 1 - 4 * 5, 4, 0)
.output_single_input_dimension(1, 2 - 5 * 5, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4 - 7 * 5, 7, 1)
.output_single_input_dimension(4, 5 - 8 * 5, 8, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5 + 5, 6 + 5}}});
}
TEST(TranslateByTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateBy(span<const Index>({1, 2})),
absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) does not match number of "
"indices \\(2\\)");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-kInfIndex),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMaxFiniteIndex - 1})
.input_shape({2})
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(
0, std::numeric_limits<Index>::min(), 1, 0)
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
"Integer overflow computing output offset .*");
}
TEST(TranslateByTest, DimSubsetUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy(5),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 5, 2, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5, 6 + 5}}});
}
TEST(TranslateByTest, DimSubsetNonUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy({5, 6}),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -6, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 6, 2, 2)
.Finalize()
.value(),
{{{3, 4, 5}, {3 + 5, 4, 5 + 6}}});
}
TEST(TranslateToTest, OneDimensionalConstant) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateTo(8),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_single_input_dimension(0, -3, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
{{{7}, {10}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimension) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, -1, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, 2 - 3, 3, 0)
.Finalize()
.value(),
{{{6}, {7}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{6}, {6}}});
}
TEST(TranslateToTest, TwoDimensionalSingleInputDimensionOneImplicit) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({4, 5})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, 4, 5, 1)
.Finalize()
.value(),
AllDims().TranslateTo({kImplicit, 10}),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, -5, 1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, -25 + 4, 5, 1)
.Finalize()
.value(),
{{{6, 7}, {6, 12}}});
}
TEST(TranslateToTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateTo(1),
absl::StatusCode::kInvalidArgument,
"Interval \\(-inf, \\+inf\\) is not bounded below");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateTo(std::numeric_limits<Index>::max()),
absl::StatusCode::kOutOfRange, "Origin [0-9]+ is outside valid range .*");
} |
#ifndef TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_
#define TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_
#include <stddef.h>
#include <atomic>
#include <string>
#include <string_view>
#include <utility>
#include "tsl/platform/macros.h"
#include "tsl/profiler/lib/nvtx_utils.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tsl/profiler/backends/cpu/annotation_stack.h"
#endif
namespace tsl::profiler {
template <typename T>
void PushAnnotation(const T& generator) {
if (auto domain = DefaultProfilerDomain();
TF_PREDICT_FALSE(domain != nullptr)) {
RangePush(domain, generator());
return;
}
#if !defined(IS_MOBILE_PLATFORM)
if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
AnnotationStack::PushAnnotation(static_cast<std::string_view>(generator()));
}
#endif
}
inline void PushAnnotation(const char* name) {
PushAnnotation([&] { return name; });
}
inline void PushAnnotation(const std::string& name) {
PushAnnotation([&] { return name; });
}
inline void PopAnnotation() {
std::atomic_thread_fence(std::memory_order_acquire);
if (auto domain = DefaultProfilerDomain();
TF_PREDICT_FALSE(domain != nullptr)) {
RangePop(domain);
return;
}
#if !defined(IS_MOBILE_PLATFORM)
if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
AnnotationStack::PopAnnotation();
}
#endif
}
class ScopedAnnotation {
public:
template <typename T>
explicit ScopedAnnotation(T&& annotation) {
PushAnnotation(std::forward<T>(annotation));
}
~ScopedAnnotation() { PopAnnotation(); }
static bool IsEnabled() {
#if !defined(IS_MOBILE_PLATFORM)
return AnnotationStack::IsEnabled();
#else
return false;
#endif
}
private:
ScopedAnnotation(const ScopedAnnotation&) = delete;
ScopedAnnotation& operator=(const ScopedAnnotation&) = delete;
};
}
#endif | #include "tsl/profiler/lib/scoped_annotation.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/profiler/backends/cpu/annotation_stack.h"
namespace tsl {
namespace profiler {
namespace {
TEST(ScopedAnnotation, Simple) {
{
ScopedAnnotation trace("blah");
EXPECT_EQ(AnnotationStack::Get(), "");
}
{
AnnotationStack::Enable(true);
ScopedAnnotation trace("blah");
EXPECT_EQ(AnnotationStack::Get(), "blah");
AnnotationStack::Enable(false);
}
{
AnnotationStack::Enable(true);
ScopedAnnotation outer("foo");
ScopedAnnotation inner("bar");
EXPECT_EQ(AnnotationStack::Get(), "foo::bar");
AnnotationStack::Enable(false);
}
{
AnnotationStack::Enable(true);
PushAnnotation("foo");
PushAnnotation("bar");
EXPECT_EQ(AnnotationStack::Get(), "foo::bar");
PopAnnotation();
PopAnnotation();
AnnotationStack::Enable(false);
}
EXPECT_EQ(AnnotationStack::Get(), "");
}
std::string GenerateRandomString(int length) {
return std::string(length, 'a');
}
void BM_ScopedAnnotationDisabled(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
for (auto s : state) {
ScopedAnnotation trace(annotation);
}
}
BENCHMARK(BM_ScopedAnnotationDisabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true);
for (auto s : state) {
ScopedAnnotation trace(annotation);
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Nested(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true);
for (auto s : state) {
ScopedAnnotation trace(annotation);
{ ScopedAnnotation trace(annotation); }
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Nested)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Adhoc(::testing::benchmark::State& state) {
AnnotationStack::Enable(true);
int i = 0;
for (auto s : state) {
ScopedAnnotation trace(absl::StrCat(i, "-", i * i));
++i;
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc);
void BM_ScopedAnnotationDisabled_Lambda(::testing::benchmark::State& state) {
int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
}
}
BENCHMARK(BM_ScopedAnnotationDisabled_Lambda);
void BM_ScopedAnnotationEnabled_Adhoc_Lambda(
::testing::benchmark::State& state) {
AnnotationStack::Enable(true);
int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc_Lambda);
}
}
} | inline void PopAnnotation() {
std::atomic_thread_fence(std::memory_order_acquire);
if (auto domain = DefaultProfilerDomain();
TF_PREDICT_FALSE(domain != nullptr)) {
RangePop(domain);
return;
}
#if !defined(IS_MOBILE_PLATFORM)
if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
AnnotationStack::PopAnnotation();
}
#endif
} | #include "tsl/profiler/lib/scoped_annotation.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/profiler/backends/cpu/annotation_stack.h"
namespace tsl {
namespace profiler {
namespace {
TEST(ScopedAnnotation, Simple) {
{
ScopedAnnotation trace("blah");
EXPECT_EQ(AnnotationStack::Get(), "");
}
{
AnnotationStack::Enable(true);
ScopedAnnotation trace("blah");
EXPECT_EQ(AnnotationStack::Get(), "blah");
AnnotationStack::Enable(false);
}
{
AnnotationStack::Enable(true);
ScopedAnnotation outer("foo");
ScopedAnnotation inner("bar");
EXPECT_EQ(AnnotationStack::Get(), "foo::bar");
AnnotationStack::Enable(false);
}
{
AnnotationStack::Enable(true);
PushAnnotation("foo");
PushAnnotation("bar");
EXPECT_EQ(AnnotationStack::Get(), "foo::bar");
PopAnnotation();
PopAnnotation();
AnnotationStack::Enable(false);
}
EXPECT_EQ(AnnotationStack::Get(), "");
} |
#include "xla/service/hlo_value_semantics_analysis.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/side_effect_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
SendRecvGroupMap::SendRecvGroupMap(const HloModule& hlo_module) {
for (HloComputation* computation : hlo_module.computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kSend &&
instruction->opcode() != HloOpcode::kRecv) {
continue;
}
std::string rendezvous = instruction->frontend_attributes().map().at(
kXlaHostTransferRendezvousNameAttr);
auto send_recv_iter = host_transfer_rendezvous_map_.find(rendezvous);
if (send_recv_iter == host_transfer_rendezvous_map_.end()) {
auto insert_success = host_transfer_rendezvous_map_.insert(
{rendezvous, SendRecvGroup{nullptr, nullptr}});
send_recv_iter = insert_success.first;
}
if (instruction->opcode() == HloOpcode::kSend) {
send_recv_iter->second.send = instruction;
} else {
send_recv_iter->second.recv = instruction;
}
}
}
}
absl::StatusOr<HloInstruction*> SendRecvGroupMap::GetMatchingSendOrRecv(
HloInstruction* send_or_recv) const {
if (send_or_recv->opcode() != HloOpcode::kSend &&
send_or_recv->opcode() != HloOpcode::kRecv) {
return InvalidArgument("Expecting only send or recv");
}
std::string rendezvous = send_or_recv->frontend_attributes().map().at(
kXlaHostTransferRendezvousNameAttr);
auto send_recv_iter = host_transfer_rendezvous_map_.find(rendezvous);
if (send_recv_iter == host_transfer_rendezvous_map_.end()) {
return Internal("Missing send or recv from send recv group.");
}
if (send_or_recv->opcode() == HloOpcode::kSend) {
return send_recv_iter->second.recv;
}
return send_recv_iter->second.send;
}
bool HloPreOrderDFS::IsReady(const HloInstruction* instruction) const {
for (HloInstruction* user : instruction->users()) {
if (!visited_.contains(user)) {
return false;
}
}
return true;
}
namespace {
std::vector<HloInstruction*> GetAllInstructionsWithZeroUsers(
const HloComputation& computation) {
std::vector<HloInstruction*> results;
for (HloInstruction* instruction : computation.instructions()) {
if (instruction->users().empty()) {
results.push_back(instruction);
}
}
return results;
}
}
absl::Status HloPreOrderDFS::Run(const HloComputation& computation,
DfsHloVisitorBase<HloInstruction*>* visitor) {
stack_.clear();
visited_.clear();
std::vector<HloInstruction*> roots =
GetAllInstructionsWithZeroUsers(computation);
for (HloInstruction* root : roots) {
stack_.push_back(root);
}
while (!stack_.empty()) {
HloInstruction* to_visit = stack_.back();
stack_.pop_back();
if (visited_.contains(to_visit)) {
continue;
}
visited_.insert(to_visit);
for (HloInstruction* operand : to_visit->mutable_operands()) {
if (IsReady(operand)) {
stack_.push_back(operand);
}
}
TF_RETURN_IF_ERROR(visitor->Preprocess(to_visit));
TF_RETURN_IF_ERROR(to_visit->Visit(visitor));
TF_RETURN_IF_ERROR(visitor->Postprocess(to_visit));
}
return absl::OkStatus();
}
namespace {
template <typename T>
std::string ToString(T element) {
return absl::StrCat(element);
}
template <>
std::string ToString(const HloValueSemantics* element) {
return element->ToString();
}
template <typename T>
std::string ToString(const ShapeTree<T>& tree) {
std::string str;
tree.ForEachElement([&str, &tree](const ShapeIndex& shape_index, T element) {
auto subshape = ShapeUtil::GetSubshape(tree.shape(), (shape_index));
absl::StrAppend(&str, shape_index.ToString(), ", ", subshape.ToString(),
": ", ToString(element), "\n");
});
return str;
}
}
absl::Status EinsumDepthAnalysis::RunInternal(
const HloComputation& computation,
const std::optional<ShapeTree<int>>& root_depth) {
std::vector<HloInstruction*> roots =
GetAllInstructionsWithZeroUsers(computation);
for (HloInstruction* root : roots) {
if (root == computation.root_instruction()) {
if (root_depth.has_value()) {
TF_RETURN_IF_ERROR(SetInstructionDepth(root, *root_depth));
} else {
TF_RETURN_IF_ERROR(SetInstructionDepth(root, 0));
}
} else {
GetOrCreateDepthTree(root);
}
}
HloPreOrderDFS dfs;
return dfs.Run(computation, this);
}
absl::StatusOr<std::unique_ptr<EinsumDepthAnalysis>> EinsumDepthAnalysis::Run(
const HloComputation& computation,
const SendRecvGroupMap& send_recv_group_map) {
EinsumDepthAnalysis* analysis_ptr =
new EinsumDepthAnalysis(send_recv_group_map);
std::unique_ptr<EinsumDepthAnalysis> analysis(analysis_ptr);
TF_RETURN_IF_ERROR(analysis->RunInternal(computation, std::nullopt));
return analysis;
}
namespace {
int MergeDepth(int original_depth, int new_depth) {
if (new_depth >= 0) {
return std::max(original_depth, new_depth);
}
if (new_depth < 0 && original_depth < 0) {
return std::min(original_depth, new_depth);
}
return original_depth;
}
void SetDepth(ShapeTree<int>& depth_tree, int depth) {
depth_tree.ForEachMutableElement(
[depth, &depth_tree](const ShapeIndex& shape_index, int* depth_ptr) {
if (depth_tree.IsLeaf(shape_index)) {
*depth_ptr = MergeDepth(*depth_ptr, depth);
}
});
}
void SetDepth(ShapeTree<int>& depth_tree, const ShapeTree<int>& source) {
depth_tree.ForEachMutableElement(
[&depth_tree, &source](const ShapeIndex& shape_index, int* depth_ptr) {
if (depth_tree.IsLeaf(shape_index)) {
*depth_ptr = MergeDepth(*depth_ptr, source.element(shape_index));
}
});
}
int GetMaxDepth(const ShapeTree<int>& depth_tree) {
int max_depth = -1;
depth_tree.ForEachElement(
[&max_depth](const ShapeIndex& shape_index, int depth) {
max_depth = std::max(max_depth, depth);
return absl::OkStatus();
});
if (max_depth >= 0) {
return max_depth;
}
depth_tree.ForEachElement(
[&max_depth](const ShapeIndex& shape_index, int depth) {
max_depth = std::min(max_depth, depth);
return absl::OkStatus();
});
return max_depth;
}
void SetDepthFromTupleDepth(ShapeTree<int>& depth_tree,
const ShapeTree<int>& tuple_depth_tree,
int tuple_index) {
depth_tree.ForEachMutableElement(
[&depth_tree, &tuple_depth_tree, tuple_index](
const ShapeIndex& shape_index, int* depth_ptr) {
if (depth_tree.IsLeaf(shape_index)) {
ShapeIndex output_index = shape_index;
output_index.push_front(tuple_index);
*depth_ptr =
MergeDepth(*depth_ptr, tuple_depth_tree.element(output_index));
}
});
}
}
ShapeTree<int>& EinsumDepthAnalysis::GetOrCreateDepthTree(
const HloInstruction* instruction) {
auto depth_iter = einsum_depth_map_.find(instruction);
if (depth_iter == einsum_depth_map_.end()) {
ShapeTree<int> depth_tree(instruction->shape(), -1);
auto inserted = einsum_depth_map_.insert(
std::make_pair(instruction, std::move(depth_tree)));
depth_iter = inserted.first;
}
return depth_iter->second;
}
ShapeTree<int>& EinsumDepthAnalysis::GetDepthTreeOrDie(
const HloInstruction* instruction) {
auto depth_iter = einsum_depth_map_.find(instruction);
CHECK(depth_iter != einsum_depth_map_.end())
<< "No depth tree found for instruction: " << instruction->ToString();
return depth_iter->second;
}
absl::Status EinsumDepthAnalysis::SetInstructionDepth(
const HloInstruction* instruction, int depth) {
ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction);
SetDepth(depth_tree, depth);
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::SetInstructionDepth(
const HloInstruction* instruction, const ShapeTree<int>& depth) {
ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction);
SetDepth(depth_tree, depth);
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::SetInstructionDepthFromTupleDepth(
const HloInstruction* instruction, const ShapeTree<int>& tuple_depth_tree,
int tuple_index) {
ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction);
SetDepthFromTupleDepth(depth_tree, tuple_depth_tree, tuple_index);
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::DefaultAction(HloInstruction* instruction) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(instruction);
int max_depth = GetMaxDepth(depth_tree);
for (int operand_index = 0; operand_index < instruction->operand_count();
++operand_index) {
const HloInstruction* operand = instruction->operand(operand_index);
TF_RETURN_IF_ERROR(SetInstructionDepth(operand, max_depth));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleTuple(HloInstruction* tuple) {
return HandleTupleLike(tuple);
}
absl::Status EinsumDepthAnalysis::HandleAllReduce(HloInstruction* all_reduce) {
if (all_reduce->shape().IsArray()) {
return DefaultAction(all_reduce);
}
return HandleTupleLike(all_reduce);
}
absl::Status EinsumDepthAnalysis::HandleTupleLike(HloInstruction* tuple_like) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(tuple_like);
for (int operand_index = 0; operand_index < tuple_like->operand_count();
++operand_index) {
HloInstruction* operand = tuple_like->mutable_operand(operand_index);
ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand);
SetDepthFromTupleDepth(operand_depth, depth_tree, operand_index);
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(get_tuple_element);
HloInstruction* operand = get_tuple_element->mutable_operand(0);
int tuple_index = get_tuple_element->tuple_index();
ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand);
operand_depth.ForEachMutableElement(
[&operand_depth, &depth_tree, tuple_index](const ShapeIndex& shape_index,
int* depth_ptr) {
if (shape_index.empty() || shape_index.front() != tuple_index) {
return;
}
if (operand_depth.IsLeaf(shape_index)) {
ShapeIndex output_index = shape_index;
output_index.pop_front();
*depth_ptr = MergeDepth(*depth_ptr, depth_tree.element(output_index));
}
});
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleDepthIncrementInstruction(
HloInstruction* instruction) {
ShapeTree<int>& depth_tree = GetDepthTreeOrDie(instruction);
int instruction_depth = depth_tree.element({});
for (HloInstruction* operand : instruction->mutable_operands()) {
TF_RETURN_IF_ERROR(SetInstructionDepth(
operand, instruction_depth >= 0 ? instruction_depth + 1
: instruction_depth - 1));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleDot(HloInstruction* dot) {
return HandleDepthIncrementInstruction(dot);
}
absl::Status EinsumDepthAnalysis::HandleConvolution(
HloInstruction* convolution) {
return HandleDepthIncrementInstruction(convolution);
}
absl::Status EinsumDepthAnalysis::HandleCall(HloInstruction* call) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(call);
return HandleCalledComputation(*call->called_computations()[0], depth_tree,
call->operands());
}
absl::Status EinsumDepthAnalysis::HandleFusion(HloInstruction* fusion) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(fusion);
return HandleCalledComputation(*fusion->called_computations()[0], depth_tree,
fusion->operands());
}
absl::Status EinsumDepthAnalysis::HandleWhile(HloInstruction* xla_while) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(xla_while);
int max_depth = GetMaxDepth(depth_tree);
HloComputation* condition_computation = xla_while->while_condition();
HloInstruction* condition_root = condition_computation->root_instruction();
ShapeTree<int> condition_depth(condition_root->shape(), max_depth);
TF_RETURN_IF_ERROR(HandleCalledComputation(
*condition_computation, condition_depth, xla_while->operands()));
const ShapeTree<int>* root_depth_ptr = &depth_tree;
HloComputation* body_computation = xla_while->while_body();
bool run_depth_propagation_on_body = true;
ShapeTree<int>& root_depth =
GetOrCreateDepthTree(body_computation->root_instruction());
while (run_depth_propagation_on_body) {
run_depth_propagation_on_body = false;
TF_RETURN_IF_ERROR(HandleCalledComputation(
*body_computation, *root_depth_ptr, xla_while->operands()));
HloInstruction* operand = body_computation->parameter_instruction(0);
const ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand);
root_depth.ForEachMutableElement(
[&run_depth_propagation_on_body, &root_depth, &operand_depth](
const ShapeIndex& shape_index, int* depth_ptr) {
if (!root_depth.IsLeaf(shape_index)) {
return;
}
if (root_depth.element(shape_index) < 0 &&
operand_depth.element(shape_index) >= 0) {
*depth_ptr = operand_depth.element(shape_index);
run_depth_propagation_on_body = true;
}
});
root_depth_ptr = &root_depth;
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleConditional(
HloInstruction* conditional) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(conditional);
TF_RETURN_IF_ERROR(
SetInstructionDepth(conditional->operands()[0], depth_tree));
for (int i = 0; i < conditional->branch_count(); ++i) {
TF_RETURN_IF_ERROR(
HandleCalledComputation(*conditional->called_computations()[i],
depth_tree, {conditional->operands()[i + 1]}));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleCalledComputation(
const HloComputation& called_computation, const ShapeTree<int>& root_depth,
absl::Span<HloInstruction* const> operands) {
TF_RETURN_IF_ERROR(RunInternal(called_computation,
std::optional<ShapeTree<int>>(root_depth)));
for (int i = 0; i < operands.size(); ++i) {
HloInstruction* operand = operands[i];
HloInstruction* parameter = called_computation.parameter_instruction(i);
const ShapeTree<int>& parameter_depth = GetOrCreateDepthTree(parameter);
TF_RETURN_IF_ERROR(SetInstructionDepth(operand, parameter_depth));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleAfterAll(HloInstruction* after_all) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(after_all);
int max_depth = GetMaxDepth(depth_tree);
for (HloInstruction* operand_token : after_all->mutable_operands()) {
CHECK(operand_token->shape().IsToken());
TF_RETURN_IF_ERROR(SetInstructionDepth(operand_token, max_depth));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleSend(HloInstruction* send) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(send);
HloInstruction* send_buffer = send->mutable_operand(0);
ShapeTree<int>& send_buffer_depth = GetOrCreateDepthTree(send_buffer);
SetDepthFromTupleDepth(send_buffer_depth, depth_tree, 0);
int max_depth = GetMaxDepth(depth_tree);
HloInstruction* token = send->mutable_operand(1);
return SetInstructionDepth(token, max_depth);
}
absl::Status EinsumDepthAnalysis::HandleRecv(HloInstruction* recv) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(recv);
TF_ASSIGN_OR_RETURN(HloInstruction * send,
send_recv_group_map_->GetMatchingSendOrRecv(recv));
CHECK(send) << "recv: " << recv->name()
<< " not found in send_recv_group_map: " << recv->ToString();
ShapeTree<int>& send_depth = GetOrCreateDepthTree(send);
int max_depth = GetMaxDepth(depth_tree);
send_depth.ForEachMutableElement([&depth_tree, &send_depth, max_depth](
const ShapeIndex& index, int* depth) {
if (!send_depth.IsLeaf(index)) {
return;
}
if (index.front() == 0) {
*depth = MergeDepth(*depth, depth_tree.element(index));
return;
}
*depth = MergeDepth(*depth, max_depth);
});
HloInstruction* after_all = recv->mutable_operand(0);
return SetInstructionDepth(after_all, max_depth);
}
absl::Status EinsumDepthAnalysis::HandleSendDone(HloInstruction* send_done) {
HloInstruction* send = send_done->mutable_operand(0);
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(send_done);
int max_depth = GetMaxDepth(depth_tree);
return SetInstructionDepth(send, max_depth);
}
absl::Status EinsumDepthAnalysis::HandleRecvDone(HloInstruction* recv_done) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(recv_done);
int max_depth = GetMaxDepth(depth_tree);
HloInstruction* recv = recv_done->mutable_operand(0);
ShapeTree<int>& recv_depth = GetOrCreateDepthTree(recv);
recv_depth.ForEachMutableElement([&depth_tree, &recv_depth, max_depth](
const ShapeIndex& index, int* depth) {
if (!recv_depth.IsLeaf(index)) {
return;
}
if (index.front() == 0) {
*depth = MergeDepth(*depth, depth_tree.element(index));
return;
}
*depth = MergeDepth(*depth, max_depth);
});
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleAsyncStart(
HloInstruction* async_start) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(async_start);
TF_ASSIGN_OR_RETURN(ShapeTree<int> output_depth_tree,
depth_tree.SubShapeTree({1}));
return HandleCalledComputation(*(async_start->async_wrapped_computation()),
output_depth_tree, async_start->operands());
}
absl::Status EinsumDepthAnalysis::HandleAsyncDone(HloInstruction* async_done) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(async_done);
HloInstruction* async_start = async_done->mutable_operand(0);
ShapeTree<int>& async_start_depth = GetOrCreateDepthTree(async_start);
async_start_depth.ForEachMutableElement(
[&depth_tree, &async_start_depth](const ShapeIndex& index, int* depth) {
if (!async_start_depth.IsLeaf(index)) {
return;
}
if (index.front() == 1) {
ShapeIndex output_index = index;
output_index.pop_front();
*depth = MergeDepth(*depth, depth_tree.element(output_index));
}
});
return absl::OkStatus();
}
namespace {
int MergeHeight(int original_height, int new_height) {
return std::max(original_height, new_height);
}
void SetHeight(ShapeTree<int>& height_tree, int height) {
height_tree.ForEachMutableElement(
[height, &height_tree](const ShapeIndex& shape_index, int* height_ptr) {
if (height_tree.IsLeaf(shape_index)) {
*height_ptr = MergeHeight(*height_ptr, height);
}
});
}
void SetHeight(ShapeTree<int>& height_tree, const ShapeTree<int>& source,
const ShapeIndex& source_index = {},
const ShapeIndex& target_index = {}) {
height_tree.ForEachMutableElement(
[&source, &source_index, &target_index](const ShapeIndex& shape_index,
int* height_ptr) {
if (shape_index.size() < target_index.size()) {
return;
}
for (int i = 0; i < target_index.size(); ++i) {
if (shape_index[i] != target_index[i]) {
return;
}
}
ShapeIndex complete_source_index = source_index;
for (int i = target_index.size(); i < shape_index.size(); ++i) {
complete_source_index.push_back(shape_index[i]);
}
*height_ptr =
MergeHeight(*height_ptr, source.element(complete_source_index));
});
}
int GetMaxHeight(const ShapeTree<int>& height_tree) {
int max_height = 0;
height_tree.ForEachElement(
[&max_height](const ShapeIndex& shape_index, int height) {
max_height = std::max(max_height, height);
return absl::OkStatus();
});
return max_height;
}
int GetMaxOperandHeight(HloInstruction* instruction,
const EinsumHeightMap& einsum_height_map) {
int max_height = 0;
for (HloInstruction* operand : instruction->mutable_operands()) {
auto operand_height_iter = einsum_height_map.find(operand);
CHECK(operand_height_iter != einsum_height_map.end())
<< "operand: " << operand->name();
const ShapeTree<int>& operand_height_tree = operand_height_iter->second;
int max_operand_height = GetMaxHeight(operand_height_tree);
max_height = std::max(max_height, max_operand_height);
}
return max_height;
}
}
absl::StatusOr<std::unique_ptr<EinsumHeightAnalysis>> EinsumHeightAnalysis::Run(
const HloComputation& computation,
const SendRecvGroupMap& send_recv_group_map) {
EinsumHeightAnalysis* analysis_ptr =
new EinsumHeightAnalysis(send_recv_group_map);
std::unique_ptr<EinsumHeightAnalysis> analysis(analysis_ptr);
TF_RETURN_IF_ERROR(analysis->RunInternal(computation, {}));
TF_RETURN_IF_ERROR(analysis->RunInternal(computation, {}));
return analysis;
}
absl::Status EinsumHeightAnalysis::RunInternal(
const HloComputation& computation,
absl::Span<HloInstruction* const> operands) {
return HandleCalledComputation(computation, operands);
}
ShapeTree<int>& EinsumHeightAnalysis::GetOrCreateHeightTree(
const HloInstruction* instruction) {
auto height_iter = einsum_height_map_.find(instruction);
if (height_iter == einsum_height_map_.end()) {
ShapeTree<int> height_tree(instruction->shape(), 0);
auto inserted = einsum_height_map_.insert(
std::make_pair(instruction, std::move(height_tree)));
height_iter = inserted.first;
}
return height_iter->second;
}
ShapeTree<int>& EinsumHeightAnalysis::GetHeightTreeOrDie(
const HloInstruction* instruction) {
auto height_iter = einsum_height_map_.find(instruction);
CHECK(height_iter != einsum_height_map_.end());
return height_iter->second;
}
bool EinsumHeightAnalysis::HasHeightFor(
const HloInstruction* instruction) const {
return einsum_height_map_.contains(instruction);
}
absl::Status EinsumHeightAnalysis::SetInstructionHeight(
const HloInstruction* instruction, int height) {
ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction);
SetHeight(height_tree, height);
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::SetInstructionHeight(
const HloInstruction* instruction, const ShapeTree<int>& height) {
ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction);
SetHeight(height_tree, height);
return absl::OkStatus();
}
#define RETURN_IF_HEIGHT_EXISTS(instruction) \
if (HasHeightFor(instruction)) { \
return absl::OkStatus(); \
}
absl::Status EinsumHeightAnalysis::HandleHeightIncrementInstruction(
HloInstruction* instruction) {
ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction);
for (HloInstruction* operand : instruction->mutable_operands()) {
const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand);
SetHeight(height_tree, operand_height_tree.element({}) + 1);
}
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleCalledComputation(
const HloComputation& computation,
absl::Span<HloInstruction* const> operands) {
if (!operands.empty()) {
if (computation.num_parameters() != operands.size()) {
return absl::InvalidArgumentError(absl::StrCat(
operands.size(), " operands were passed for the computation ",
computation.name(), " with ", computation.num_parameters(),
" parameters."));
}
for (int parameter_index = 0;
parameter_index < computation.num_parameters(); ++parameter_index) {
HloInstruction* parameter =
computation.parameter_instruction(parameter_index);
HloInstruction* operand = operands[parameter_index];
const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand);
TF_RETURN_IF_ERROR(SetInstructionHeight(parameter, operand_height_tree));
}
}
for (HloInstruction* instruction : computation.instructions()) {
if (instruction->user_count() == 0) {
TF_RETURN_IF_ERROR(instruction->Accept(this));
}
}
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::DefaultAction(HloInstruction* instruction) {
RETURN_IF_HEIGHT_EXISTS(instruction);
int instruction_height = GetMaxOperandHeight(instruction, einsum_height_map_);
return SetInstructionHeight(instruction, instruction_height);
}
absl::Status EinsumHeightAnalysis::HandleTupleLike(HloInstruction* tuple_like) {
ShapeTree<int>& height_tree = GetOrCreateHeightTree(tuple_like);
height_tree.ForEachMutableElement([&height_tree, tuple_like, this](
const ShapeIndex& index, int* height) {
if (!height_tree.IsLeaf(index)) {
return;
}
int operand_index = index.front();
const HloInstruction* operand = tuple_like->operand(operand_index);
const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand);
ShapeIndex source_index = index;
source_index.pop_front();
*height = MergeHeight(*height, operand_height_tree.element(source_index));
});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleTuple(HloInstruction* tuple) {
RETURN_IF_HEIGHT_EXISTS(tuple);
return HandleTupleLike(tuple);
}
absl::Status EinsumHeightAnalysis::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
RETURN_IF_HEIGHT_EXISTS(get_tuple_element);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(get_tuple_element);
const ShapeTree<int>& tuple_height_tree =
GetHeightTreeOrDie(get_tuple_element->operand(0));
int tuple_index = get_tuple_element->tuple_index();
SetHeight(height_tree, tuple_height_tree, {tuple_index}, {});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleDot(HloInstruction* dot) {
RETURN_IF_HEIGHT_EXISTS(dot);
return HandleHeightIncrementInstruction(dot);
}
absl::Status EinsumHeightAnalysis::HandleConvolution(
HloInstruction* convolution) {
RETURN_IF_HEIGHT_EXISTS(convolution);
return HandleHeightIncrementInstruction(convolution);
}
absl::Status EinsumHeightAnalysis::HandleCall(HloInstruction* call) {
RETURN_IF_HEIGHT_EXISTS(call);
TF_RETURN_IF_ERROR(HandleCalledComputation(*(call->called_computations()[0]),
call->mutable_operands()));
const ShapeTree<int>& root_height_tree =
GetHeightTreeOrDie(call->called_computations()[0]->root_instruction());
TF_RETURN_IF_ERROR(SetInstructionHeight(call, root_height_tree));
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleFusion(HloInstruction* fusion) {
RETURN_IF_HEIGHT_EXISTS(fusion);
return HandleCall(fusion);
}
absl::Status EinsumHeightAnalysis::HandleWhile(HloInstruction* xla_while) {
RETURN_IF_HEIGHT_EXISTS(xla_while);
TF_RETURN_IF_ERROR(HandleCalledComputation(*(xla_while->while_condition()),
xla_while->mutable_operands()));
TF_RETURN_IF_ERROR(HandleCalledComputation(*(xla_while->while_body()),
xla_while->mutable_operands()));
const ShapeTree<int>& root_height_tree =
GetHeightTreeOrDie(xla_while->while_body()->root_instruction());
return SetInstructionHeight(xla_while, root_height_tree);
}
absl::Status EinsumHeightAnalysis::HandleConditional(
HloInstruction* conditional) {
RETURN_IF_HEIGHT_EXISTS(conditional);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(conditional);
for (size_t i = 0; i < conditional->branch_count(); ++i) {
HloComputation* computation = conditional->branch_computation(i);
TF_RETURN_IF_ERROR(HandleCalledComputation(
*computation, {conditional->mutable_operands()[i + 1]}));
ShapeTree<int>& branch_root_height_tree =
GetHeightTreeOrDie(computation->root_instruction());
SetHeight(height_tree, branch_root_height_tree);
}
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleSend(HloInstruction* send) {
RETURN_IF_HEIGHT_EXISTS(send);
HloInstruction* send_buffer = send->mutable_operand(0);
const ShapeTree<int>& send_buffer_height_tree =
GetHeightTreeOrDie(send_buffer);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(send);
SetHeight(height_tree, send_buffer_height_tree, {}, {0});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleRecv(HloInstruction* recv) {
RETURN_IF_HEIGHT_EXISTS(recv);
TF_ASSIGN_OR_RETURN(HloInstruction * send,
send_recv_group_map_->GetMatchingSendOrRecv(recv));
TF_RETURN_IF_ERROR(send->Accept(this));
HloInstruction* send_buffer = send->mutable_operand(0);
const ShapeTree<int>& send_buffer_height_tree =
GetHeightTreeOrDie(send_buffer);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(recv);
SetHeight(height_tree, send_buffer_height_tree, {}, {0});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleSendDone(HloInstruction* send_done) {
RETURN_IF_HEIGHT_EXISTS(send_done);
GetOrCreateHeightTree(send_done);
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleRecvDone(HloInstruction* recv_done) {
RETURN_IF_HEIGHT_EXISTS(recv_done);
HloInstruction* recv = recv_done->mutable_operand(0);
const ShapeTree<int>& recv_height_tree = GetHeightTreeOrDie(recv);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(recv_done);
SetHeight(height_tree, recv_height_tree, {0}, {0});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleAllReduce(HloInstruction* all_reduce) {
RETURN_IF | #include "xla/service/hlo_value_semantics_analysis.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
const char kMnistHlo[] = R"(
HloModule MnistTrainingLoopWithInfeed.140, entry_computation_layout={(f32[784,128]{1,0:T(8,128)},f32[128]{0:T(256)},f32[128,32]{1,0:T(8,128)},f32[32]{0:T(256)},f32[32,10]{1,0:T(8,128)},f32[10]{0:T(256)})->(f32[784,128]{1,0:T(8,128)}, f32[128]{0:T(256)}, f32[128,32]{1,0:T(8,128)}, f32[32]{0:T(256)}, f32[32,10]{1,0:T(8,128)}, f32[10]{0:T(256)})}
relu.9 {
x.10 = f32[] parameter(0)
constant.11 = f32[] constant(0)
ROOT maximum.12 = f32[] maximum(x.10, constant.11)
}
max_F32.17 {
lhs.18 = f32[] parameter(0)
rhs.19 = f32[] parameter(1)
ROOT maximum.20 = f32[] maximum(lhs.18, rhs.19)
}
add_F32.1 {
lhs.22 = f32[] parameter(0)
rhs.23 = f32[] parameter(1)
ROOT add.24 = f32[] add(lhs.22, rhs.23)
}
relu_gradients.29 {
activation.30 = f32[] parameter(0)
constant.32 = f32[] constant(0)
compare.33 = pred[] compare(activation.30, constant.32), direction=GT
backprop.31 = f32[] parameter(1)
ROOT select.34 = f32[] select(compare.33, backprop.31, constant.32)
}
body.49 {
after-all.51 = token[] after-all()
infeed.52 = ((f32[100,784]{1,0}, f32[100,10]{1,0}, pred[]), token[]) infeed(after-all.51)
get.53 = (f32[100,784]{1,0}, f32[100,10]{1,0}, pred[]) get-tuple-element(infeed.52), index=0
get.54 = f32[100,784]{1,0} get-tuple-element(get.53), index=0
prev.50 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) parameter(0)
get.57 = f32[784,128]{1,0} get-tuple-element(prev.50), index=0
dot.63 = f32[100,128]{1,0} dot(get.54, get.57), lhs_contracting_dims={1}, rhs_contracting_dims={0}
get.58 = f32[128]{0} get-tuple-element(prev.50), index=1
broadcast.64 = f32[100,128]{1,0} broadcast(get.58), dimensions={1}
add.65 = f32[100,128]{1,0} add(dot.63, broadcast.64)
map.66 = f32[100,128]{1,0} map(add.65), dimensions={0,1}, to_apply=relu.9
get.59 = f32[128,32]{1,0} get-tuple-element(prev.50), index=2
dot.67 = f32[100,32]{1,0} dot(map.66, get.59), lhs_contracting_dims={1}, rhs_contracting_dims={0}
get.60 = f32[32]{0} get-tuple-element(prev.50), index=3
broadcast.68 = f32[100,32]{1,0} broadcast(get.60), dimensions={1}
add.69 = f32[100,32]{1,0} add(dot.67, broadcast.68)
map.70 = f32[100,32]{1,0} map(add.69), dimensions={0,1}, to_apply=relu.9
get.61 = f32[32,10]{1,0} get-tuple-element(prev.50), index=4
dot.71 = f32[100,10]{1,0} dot(map.70, get.61), lhs_contracting_dims={1}, rhs_contracting_dims={0}
get.62 = f32[10]{0} get-tuple-element(prev.50), index=5
broadcast.72 = f32[100,10]{1,0} broadcast(get.62), dimensions={1}
add.73 = f32[100,10]{1,0} add(dot.71, broadcast.72)
constant.74 = f32[] constant(-inf)
reduce.75 = f32[100]{0} reduce(add.73, constant.74), dimensions={1}, to_apply=max_F32.17
broadcast.76 = f32[100,10]{1,0} broadcast(reduce.75), dimensions={0}
subtract.77 = f32[100,10]{1,0} subtract(add.73, broadcast.76)
exponential.78 = f32[100,10]{1,0} exponential(subtract.77)
constant.79 = f32[] constant(0)
reduce.80 = f32[100]{0} reduce(exponential.78, constant.79), dimensions={1}, to_apply=add_F32.1
broadcast.81 = f32[100,10]{1,0} broadcast(reduce.80), dimensions={0}
divide.82 = f32[100,10]{1,0} divide(exponential.78, broadcast.81)
get.55 = f32[100,10]{1,0} get-tuple-element(get.53), index=1
subtract.83 = f32[100,10]{1,0} subtract(divide.82, get.55)
transpose.88 = f32[10,32]{0,1} transpose(get.61), dimensions={1,0}
dot.89 = f32[100,32]{1,0} dot(subtract.83, transpose.88), lhs_contracting_dims={1}, rhs_contracting_dims={0}
map.90 = f32[100,32]{1,0} map(map.70, dot.89), dimensions={0,1}, to_apply=relu_gradients.29
transpose.95 = f32[32,128]{0,1} transpose(get.59), dimensions={1,0}
dot.96 = f32[100,128]{1,0} dot(map.90, transpose.95), lhs_contracting_dims={1}, rhs_contracting_dims={0}
map.97 = f32[100,128]{1,0} map(map.66, dot.96), dimensions={0,1}, to_apply=relu_gradients.29
transpose.98 = f32[784,100]{0,1} transpose(get.54), dimensions={1,0}
dot.99 = f32[784,128]{1,0} dot(transpose.98, map.97), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant.104 = f32[] constant(0.01)
broadcast.105 = f32[784,128]{1,0} broadcast(constant.104), dimensions={}
multiply.106 = f32[784,128]{1,0} multiply(dot.99, broadcast.105)
subtract.107 = f32[784,128]{1,0} subtract(get.57, multiply.106)
reduce.101 = f32[128]{0} reduce(map.97, constant.79), dimensions={0}, to_apply=add_F32.1
broadcast.109 = f32[128]{0} broadcast(constant.104), dimensions={}
multiply.110 = f32[128]{0} multiply(reduce.101, broadcast.109)
subtract.111 = f32[128]{0} subtract(get.58, multiply.110)
transpose.91 = f32[128,100]{0,1} transpose(map.66), dimensions={1,0}
dot.92 = f32[128,32]{1,0} dot(transpose.91, map.90), lhs_contracting_dims={1}, rhs_contracting_dims={0}
broadcast.113 = f32[128,32]{1,0} broadcast(constant.104), dimensions={}
multiply.114 = f32[128,32]{1,0} multiply(dot.92, broadcast.113)
subtract.115 = f32[128,32]{1,0} subtract(get.59, multiply.114)
reduce.94 = f32[32]{0} reduce(map.90, constant.79), dimensions={0}, to_apply=add_F32.1
broadcast.117 = f32[32]{0} broadcast(constant.104), dimensions={}
multiply.118 = f32[32]{0} multiply(reduce.94, broadcast.117)
subtract.119 = f32[32]{0} subtract(get.60, multiply.118)
transpose.84 = f32[32,100]{0,1} transpose(map.70), dimensions={1,0}
dot.85 = f32[32,10]{1,0} dot(transpose.84, subtract.83), lhs_contracting_dims={1}, rhs_contracting_dims={0}
broadcast.121 = f32[32,10]{1,0} broadcast(constant.104), dimensions={}
multiply.122 = f32[32,10]{1,0} multiply(dot.85, broadcast.121)
subtract.123 = f32[32,10]{1,0} subtract(get.61, multiply.122)
reduce.87 = f32[10]{0} reduce(subtract.83, constant.79), dimensions={0}, to_apply=add_F32.1
broadcast.125 = f32[10]{0} broadcast(constant.104), dimensions={}
multiply.126 = f32[10]{0} multiply(reduce.87, broadcast.125)
subtract.127 = f32[10]{0} subtract(get.62, multiply.126)
get.56 = pred[] get-tuple-element(get.53), index=2
ROOT tuple.128 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) tuple(subtract.107, subtract.111, subtract.115, subtract.119, subtract.123, subtract.127, get.56)
}
condition.129 {
prev.130 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) parameter(0)
ROOT get.131 = pred[] get-tuple-element(prev.130), index=6
}
ENTRY MnistTrainingLoopWithInfeed.140 {
layer1_weights.1 = f32[784,128]{1,0} parameter(0)
layer1_biases.2 = f32[128]{0} parameter(1)
layer2_weights.3 = f32[128,32]{1,0} parameter(2)
layer2_biases.4 = f32[32]{0} parameter(3)
layer3_weights.5 = f32[32,10]{1,0} parameter(4)
layer3_biases.6 = f32[10]{0} parameter(5)
constant.7 = pred[] constant(true)
tuple.8 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) tuple(layer1_weights.1, layer1_biases.2, layer2_weights.3, layer2_biases.4, layer3_weights.5, layer3_biases.6, constant.7)
while.132 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) while(tuple.8), condition=condition.129, body=body.49
get.133 = f32[784,128]{1,0} get-tuple-element(while.132), index=0
get.134 = f32[128]{0} get-tuple-element(while.132), index=1
get.135 = f32[128,32]{1,0} get-tuple-element(while.132), index=2
get.136 = f32[32]{0} get-tuple-element(while.132), index=3
get.137 = f32[32,10]{1,0} get-tuple-element(while.132), index=4
get.138 = f32[10]{0} get-tuple-element(while.132), index=5
ROOT tuple.139 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}) tuple(get.133, get.134, get.135, get.136, get.137, get.138)
}
)";
class HloValueSemanticsAnalysisTest : public HloTestBase {
public:
bool HasLabel(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name,
const HloValueSemanticLabel& expected_label) {
HloInstruction* instruction = FindInstruction(module, instruction_name);
const HloValueSemantics* semantics =
hlo_value_semantics_analysis.GetSemantics(instruction);
LOG(INFO) << "instruction: " << instruction->ToString()
<< semantics->ToString();
return semantics->label() == expected_label;
}
bool IsStatic(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kStatic);
}
bool IsWeight(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kWeight);
}
bool IsActivation(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kActivation);
}
bool IsActivationGradient(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kActivationGradient);
}
bool IsWeightGradient(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kWeightGradient);
}
bool IsTupleOrToken(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kTupleOrToken);
}
};
TEST_F(HloValueSemanticsAnalysisTest, OneMatmul) {
const std::string module_str = R"(
HloModule OneMatmul
region_0.39 {
Arg_0.40 = f32[] parameter(0)
Arg_1.41 = f32[] parameter(1)
ROOT add.42 = f32[] add(Arg_0.40, Arg_1.41)
}
ENTRY entry {
Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1}
Arg_7.8 = f32[4,32]{1,0} parameter(1), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_7.8), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.2 = f32[4,128]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[2,1]0,1}
maximum.33 = f32[4,128]{1,0} maximum(dot.0, broadcast.2), sharding={devices=[2,1]0,1}
compare.34 = pred[4,128]{1,0} compare(dot.0, maximum.33), direction=EQ, sharding={devices=[2,1]0,1}
constant.4 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[4,128]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[2,1]0,1}
select.35 = f32[4,128]{1,0} select(compare.34, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1}
dot.2 = f32[32,128]{0,1} dot(copy, select.35), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.11 = f32[] constant(-0.01), sharding={replicated}
broadcast.12 = f32[32,128]{1,0} broadcast(constant.11), dimensions={}, sharding={devices=[2,1]0,1}
multiply.52 = f32[32,128]{0,1} multiply(dot.2, broadcast.12), sharding={devices=[2,1]0,1}
add.93 = f32[32,128]{1,0} add(Arg_1.2, multiply.52), sharding={devices=[2,1]0,1}
reduce.43 = f32[] reduce(maximum.33, constant.5), dimensions={0,1}, to_apply=region_0.39, sharding={replicated}
ROOT tuple.109 = (f32[32,128]{1,0}, f32[]) tuple(add.93, reduce.43), sharding={{devices=[2,1]0,1}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "copy"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "Arg_1.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "select.35"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2"));
}
TEST_F(HloValueSemanticsAnalysisTest, HandleConditional) {
const std::string module_str = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
tgte1 = f32[4] ceil(tparam)
ROOT tuple = (f32[4], f32[4]) tuple(tparam, tgte1)
}
branch1 {
fparam = f32[4] parameter(0)
%async-start = ((f32[4]), f32[4], s32[]) abs-start(f32[4] fparam), async_execution_thread="parallel_thread"
%async-done = f32[4] abs-done(((f32[4]), f32[4], s32[]) %async-start)
ROOT tuple = (f32[4], f32[4]) tuple(fparam, %async-done)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = (f32[4], f32[4]) conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(IsTupleOrToken(*hlo_value_semantics_analysis, module.get(),
"conditional"));
}
TEST_F(HloValueSemanticsAnalysisTest, TwoMatmuls) {
const std::string module_str = R"(
HloModule TwoMatmuls
region_0.44 {
Arg_0.45 = f32[] parameter(0)
Arg_1.46 = f32[] parameter(1)
ROOT add.47 = f32[] add(Arg_0.45, Arg_1.46)
}
ENTRY entry {
Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1}
Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
Arg_2.3 = f32[128,8]{1,0} parameter(1), sharding={devices=[1,2]0,1}
dot.1 = f32[4,8]{1,0} dot(dot.0, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.1 = f32[4,8]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[1,2]0,1}
maximum.38 = f32[4,8]{1,0} maximum(dot.1, broadcast.1), sharding={devices=[1,2]0,1}
compare.39 = pred[4,8]{1,0} compare(dot.1, maximum.38), direction=EQ, sharding={devices=[1,2]0,1}
constant.4 = f32[] constant(1), sharding={replicated}
broadcast.0 = f32[4,8]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[1,2]0,1}
select.40 = f32[4,8]{1,0} select(compare.39, broadcast.0, broadcast.1), sharding={devices=[1,2]0,1}
dot.2 = f32[4,128]{1,0} dot(select.40, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1}
dot.5 = f32[32,128]{0,1} dot(copy, dot.2), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.12 = f32[] constant(-0.01), sharding={replicated}
broadcast.13 = f32[32,128]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[2,1]0,1}
multiply.68 = f32[32,128]{0,1} multiply(dot.5, broadcast.13), sharding={devices=[2,1]0,1}
add.79 = f32[32,128]{1,0} add(Arg_1.2, multiply.68), sharding={devices=[2,1]0,1}
dot.6 = f32[128,8]{0,1} dot(dot.0, select.40), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
broadcast.11 = f32[128,8]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[1,2]0,1}
multiply.69 = f32[128,8]{0,1} multiply(dot.6, broadcast.11), sharding={devices=[1,2]0,1}
add.80 = f32[128,8]{1,0} add(Arg_2.3, multiply.69), sharding={devices=[1,2]0,1}
reduce.48 = f32[] reduce(maximum.38, constant.5), dimensions={0,1}, to_apply=region_0.44, sharding={replicated}
ROOT tuple.95 = (f32[32,128]{1,0}, f32[128,8]{1,0}, f32[]) tuple(add.79, add.80, reduce.48), sharding={{devices=[2,1]0,1}, {devices=[1,2]0,1}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "copy"));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_1.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0"));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_2.3"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.1"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "select.40"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.5"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.6"));
}
TEST_F(HloValueSemanticsAnalysisTest, RepeatWhile) {
const std::string module_str = R"(
HloModule RepeatWhile
region_0.52 {
arg_tuple.53 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.54 = s32[] get-tuple-element(arg_tuple.53), index=0, sharding={replicated}
constant.61 = s32[] constant(1), sharding={replicated}
add.105 = s32[] add(get-tuple-element.54, constant.61), sharding={replicated}
get-tuple-element.55 = f32[4,32]{1,0} get-tuple-element(arg_tuple.53), index=1, sharding={devices=[2,1]0,1}
get-tuple-element.59 = f32[3,32,128]{2,1,0} get-tuple-element(arg_tuple.53), index=5, sharding={devices=[1,2,1]0,1}
constant.69 = s32[] constant(0), sharding={replicated}
compare.70 = pred[] compare(get-tuple-element.54, constant.69), direction=LT, sharding={replicated}
constant.68 = s32[] constant(3), sharding={replicated}
add.71 = s32[] add(get-tuple-element.54, constant.68), sharding={replicated}
select.72 = s32[] select(compare.70, add.71, get-tuple-element.54), sharding={replicated}
dynamic-slice.73 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.59, select.72, constant.69, constant.69), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1}
reshape.74 = f32[32,128]{1,0} reshape(dynamic-slice.73), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(get-tuple-element.55, reshape.74), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
get-tuple-element.60 = f32[3,128,32]{2,1,0} get-tuple-element(arg_tuple.53), index=6, sharding={devices=[1,1,2]0,1}
dynamic-slice.78 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.60, select.72, constant.69, constant.69), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1}
reshape.79 = f32[128,32]{1,0} reshape(dynamic-slice.78), sharding={devices=[1,2]0,1}
dot.1 = f32[4,32]{1,0} dot(dot.0, reshape.79), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.43 = f32[] constant(0), sharding={replicated}
broadcast.2 = f32[4,32]{1,0} broadcast(constant.43), dimensions={}, sharding={devices=[2,1]0,1}
maximum.84 = f32[4,32]{1,0} maximum(dot.1, broadcast.2), sharding={devices=[2,1]0,1}
get-tuple-element.56 = f32[3,4,128]{2,1,0} get-tuple-element(arg_tuple.53), index=2, sharding={devices=[1,2,1]0,1}
reshape.90 = f32[1,4,128]{2,1,0} reshape(dot.0), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.94 = f32[3,4,128]{2,1,0} dynamic-update-slice(get-tuple-element.56, reshape.90, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
get-tuple-element.57 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=3, sharding={devices=[1,2,1]0,1}
compare.85 = pred[4,32]{1,0} compare(dot.1, maximum.84), direction=EQ, sharding={devices=[2,1]0,1}
constant.42 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[4,32]{1,0} broadcast(constant.42), dimensions={}, sharding={devices=[2,1]0,1}
select.86 = f32[4,32]{1,0} select(compare.85, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1}
reshape.95 = f32[1,4,32]{2,1,0} reshape(select.86), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.99 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.57, reshape.95, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
get-tuple-element.58 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=4, sharding={devices=[1,2,1]0,1}
reshape.100 = f32[1,4,32]{2,1,0} reshape(get-tuple-element.55), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.104 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.58, reshape.100, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
ROOT tuple.106 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(add.105, maximum.84, dynamic-update-slice.94, dynamic-update-slice.99, dynamic-update-slice.104, get-tuple-element.59, get-tuple-element.60), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
}
region_1.107 {
arg_tuple.108 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.109 = s32[] get-tuple-element(arg_tuple.108), index=0, sharding={replicated}
constant.116 = s32[] constant(3)
ROOT compare.117 = pred[] compare(get-tuple-element.109, constant.116), direction=LT
}
region_2.126 {
Arg_0.127 = f32[] parameter(0)
Arg_1.128 = f32[] parameter(1)
ROOT add.129 = f32[] add(Arg_0.127, Arg_1.128)
}
wide.wide.region_3.156.clone.clone {
wide_param.7 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
get-tuple-element.185 = s32[] get-tuple-element(wide_param.7), index=0, sharding={replicated}
constant.34 = s32[] constant(1), sharding={replicated}
add.14 = s32[] add(get-tuple-element.185, constant.34), sharding={replicated}
get-tuple-element.186 = f32[4,32]{1,0} get-tuple-element(wide_param.7), index=1, sharding={devices=[2,1]0,1}
get-tuple-element.190 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=5, sharding={devices=[1,2,1]0,1}
constant.35 = s32[] constant(3), sharding={replicated}
subtract.3 = s32[] subtract(constant.35, get-tuple-element.185), sharding={replicated}
constant.6..sunk.4 = s32[] constant(-1), sharding={replicated}
add.15 = s32[] add(subtract.3, constant.6..sunk.4), sharding={replicated}
constant.36 = s32[] constant(0), sharding={replicated}
compare.7 = pred[] compare(add.15, constant.36), direction=LT, sharding={replicated}
constant.26..sunk.1 = s32[] constant(2), sharding={replicated}
add.16 = s32[] add(subtract.3, constant.26..sunk.1), sharding={replicated}
select.4 = s32[] select(compare.7, add.16, add.15), sharding={replicated}
dynamic-slice.15 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.190, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1}
reshape.21 = f32[4,32]{1,0} reshape(dynamic-slice.15), sharding={devices=[2,1]0,1}
multiply.3 = f32[4,32]{1,0} multiply(get-tuple-element.186, reshape.21), sharding={devices=[2,1]0,1}
get-tuple-element.192 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=7, sharding={devices=[1,1,2]0,1}
dynamic-slice.16 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.192, select.4, constant.36, constant.36), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1}
reshape.22 = f32[128,32]{1,0} reshape(dynamic-slice.16), sharding={devices=[1,2]0,1}
dot.20 = f32[4,128]{1,0} dot(multiply.3, reshape.22), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1}
get-tuple-element.191 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=6, sharding={devices=[1,2,1]0,1}
dynamic-slice.17 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.191, select.4, constant.36, constant.36), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1}
reshape.23 = f32[32,128]{1,0} reshape(dynamic-slice.17), sharding={devices=[2,1]0,1}
dot.21 = f32[4,32]{1,0} dot(dot.20, reshape.23), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[1,2]0,1}
get-tuple-element.187 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=2, sharding={devices=[1,2,1]0,1}
get-tuple-element.193 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=8, sharding={devices=[1,2,1]0,1}
dynamic-slice.18 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.193, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1}
reshape.24 = f32[4,32]{1,0} reshape(dynamic-slice.18), sharding={devices=[2,1]0,1}
dot.22 = f32[32,128]{0,1} dot(reshape.24, dot.20), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
reshape.25 = f32[1,32,128]{2,1,0} reshape(dot.22), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.6 = f32[3,32,128]{2,1,0} dynamic-update-slice(get-tuple-element.187, reshape.25, select.4, constant.36, constant.36), sharding={devices=[1,2,1]0,1}
get-tuple-element.188 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=3, sharding={devices=[1,1,2]0,1}
get-tuple-element.189 = f32[3,4,128]{2,1,0} get-tuple-element(wide_param.7), index=4, sharding={devices=[1,2,1]0,1}
dynamic-slice.19 = f32[1,4,128]{2,1,0} dynamic-slice(get-tuple-element.189, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,128}, sharding={devices=[1,2,1]0,1}
reshape.26 = f32[4,128]{1,0} reshape(dynamic-slice.19), sharding={devices=[2,1]0,1}
dot.23 = f32[128,32]{0,1} dot(reshape.26, multiply.3), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
reshape.27 = f32[1,128,32]{2,1,0} reshape(dot.23), sharding={devices=[1,1,2]0,1}
dynamic-update-slice.7 = f32[3,128,32]{2,1,0} dynamic-update-slice(get-tuple-element.188, reshape.27, select.4, constant.36, constant.36), sharding={devices=[1,1,2]0,1}
ROOT tuple.19 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(add.14, dot.21, dynamic-update-slice.6, dynamic-update-slice.7, get-tuple-element.189, get-tuple-element.190, get-tuple-element.191, get-tuple-element.192, get-tuple-element.193), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
}
wide.wide.region_4.218.clone.clone {
wide_param.6 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
get-tuple-element.184 = s32[] get-tuple-element(wide_param.6), index=0, sharding={replicated}
constant.28 = s32[] constant(3)
ROOT compare.6 = pred[] compare(get-tuple-element.184, constant.28), direction=LT
}
ENTRY entry {
Arg_1.2 = f32[3,32,128]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1}
constant.45 = s32[] constant(0), sharding={replicated}
constant.23 = f32[] constant(1), sharding={replicated}
broadcast.24 = f32[4,32]{1,0} broadcast(constant.23), dimensions={}, sharding={devices=[1,2]0,1}
constant.21 = f32[] constant(0), sharding={replicated}
broadcast.22 = f32[3,32,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
broadcast.20 = f32[3,128,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,1,2]0,1}
Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1}
broadcast.28 = f32[3,4,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
broadcast.26 = f32[3,4,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
Arg_2.3 = f32[3,128,32]{2,1,0} parameter(1), sharding={devices=[1,1,2]0,1}
tuple.42 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(constant.45, copy, broadcast.28, broadcast.26, broadcast.26, Arg_1.2, Arg_2.3), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
while.118 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) while(tuple.42), condition=region_1.107, body=region_0.52, sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.179 = f32[3,4,128]{2,1,0} get-tuple-element(while.118), index=2, sharding={devices=[1,2,1]0,1}
get-tuple-element.180 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=3, sharding={devices=[1,2,1]0,1}
get-tuple-element.183 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=4, sharding={devices=[1,2,1]0,1}
tuple.18 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(constant.45, broadcast.24, broadcast.22, broadcast.20, get-tuple-element.179, get-tuple-element.180, Arg_1.2, Arg_2.3, get-tuple-element.183), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
while.3 = (s32[], f3 | absl::Status EinsumHeightAnalysis::SetInstructionHeight(
const HloInstruction* instruction, const ShapeTree<int>& height) {
ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction);
SetHeight(height_tree, height);
return absl::OkStatus();
} | TEST_F(HloValueSemanticsAnalysisTest, OneMatmul) {
const std::string module_str = R"(
HloModule OneMatmul
region_0.39 {
Arg_0.40 = f32[] parameter(0)
Arg_1.41 = f32[] parameter(1)
ROOT add.42 = f32[] add(Arg_0.40, Arg_1.41)
}
ENTRY entry {
Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1}
Arg_7.8 = f32[4,32]{1,0} parameter(1), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_7.8), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.2 = f32[4,128]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[2,1]0,1}
maximum.33 = f32[4,128]{1,0} maximum(dot.0, broadcast.2), sharding={devices=[2,1]0,1}
compare.34 = pred[4,128]{1,0} compare(dot.0, maximum.33), direction=EQ, sharding={devices=[2,1]0,1}
constant.4 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[4,128]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[2,1]0,1}
select.35 = f32[4,128]{1,0} select(compare.34, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1}
dot.2 = f32[32,128]{0,1} dot(copy, select.35), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.11 = f32[] constant(-0.01), sharding={replicated}
broadcast.12 = f32[32,128]{1,0} broadcast(constant.11), dimensions={}, sharding={devices=[2,1]0,1}
multiply.52 = f32[32,128]{0,1} multiply(dot.2, broadcast.12), sharding={devices=[2,1]0,1}
add.93 = f32[32,128]{1,0} add(Arg_1.2, multiply.52), sharding={devices=[2,1]0,1}
reduce.43 = f32[] reduce(maximum.33, constant.5), dimensions={0,1}, to_apply=region_0.39, sharding={replicated}
ROOT tuple.109 = (f32[32,128]{1,0}, f32[]) tuple(add.93, reduce.43), sharding={{devices=[2,1]0,1}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "copy"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "Arg_1.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "select.35"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2"));
}
TEST_F(HloValueSemanticsAnalysisTest, HandleConditional) {
const std::string module_str = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
tgte1 = f32[4] ceil(tparam)
ROOT tuple = (f32[4], f32[4]) tuple(tparam, tgte1)
}
branch1 {
fparam = f32[4] parameter(0)
%async-start = ((f32[4]), f32[4], s32[]) abs-start(f32[4] fparam), async_execution_thread="parallel_thread"
%async-done = f32[4] abs-done(((f32[4]), f32[4], s32[]) %async-start)
ROOT tuple = (f32[4], f32[4]) tuple(fparam, %async-done)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = (f32[4], f32[4]) conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(IsTupleOrToken(*hlo_value_semantics_analysis, module.get(),
"conditional"));
}
TEST_F(HloValueSemanticsAnalysisTest, TwoMatmuls) {
const std::string module_str = R"(
HloModule TwoMatmuls
region_0.44 {
Arg_0.45 = f32[] parameter(0)
Arg_1.46 = f32[] parameter(1)
ROOT add.47 = f32[] add(Arg_0.45, Arg_1.46)
}
ENTRY entry {
Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1}
Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
Arg_2.3 = f32[128,8]{1,0} parameter(1), sharding={devices=[1,2]0,1}
dot.1 = f32[4,8]{1,0} dot(dot.0, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.1 = f32[4,8]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[1,2]0,1}
maximum.38 = f32[4,8]{1,0} maximum(dot.1, broadcast.1), sharding={devices=[1,2]0,1}
compare.39 = pred[4,8]{1,0} compare(dot.1, maximum.38), direction=EQ, sharding={devices=[1,2]0,1}
constant.4 = f32[] constant(1), sharding={replicated}
broadcast.0 = f32[4,8]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[1,2]0,1}
select.40 = f32[4,8]{1,0} select(compare.39, broadcast.0, broadcast.1), sharding={devices=[1,2]0,1}
dot.2 = f32[4,128]{1,0} dot(select.40, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1}
dot.5 = f32[32,128]{0,1} dot(copy, dot.2), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.12 = f32[] constant(-0.01), sharding={replicated}
broadcast.13 = f32[32,128]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[2,1]0,1}
multiply.68 = f32[32,128]{0,1} multiply(dot.5, broadcast.13), sharding={devices=[2,1]0,1}
add.79 = f32[32,128]{1,0} add(Arg_1.2, multiply.68), sharding={devices=[2,1]0,1}
dot.6 = f32[128,8]{0,1} dot(dot.0, select.40), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
broadcast.11 = f32[128,8]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[1,2]0,1}
multiply.69 = f32[128,8]{0,1} multiply(dot.6, broadcast.11), sharding={devices=[1,2]0,1}
add.80 = f32[128,8]{1,0} add(Arg_2.3, multiply.69), sharding={devices=[1,2]0,1}
reduce.48 = f32[] reduce(maximum.38, constant.5), dimensions={0,1}, to_apply=region_0.44, sharding={replicated}
ROOT tuple.95 = (f32[32,128]{1,0}, f32[128,8]{1,0}, f32[]) tuple(add.79, add.80, reduce.48), sharding={{devices=[2,1]0,1}, {devices=[1,2]0,1}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "copy"));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_1.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0"));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_2.3"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.1"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "select.40"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.5"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.6"));
}
TEST_F(HloValueSemanticsAnalysisTest, RepeatWhile) {
const std::string module_str = R"(
HloModule RepeatWhile
region_0.52 {
arg_tuple.53 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.54 = s32[] get-tuple-element(arg_tuple.53), index=0, sharding={replicated}
constant.61 = s32[] constant(1), sharding={replicated}
add.105 = s32[] add(get-tuple-element.54, constant.61), sharding={replicated}
get-tuple-element.55 = f32[4,32]{1,0} get-tuple-element(arg_tuple.53), index=1, sharding={devices=[2,1]0,1}
get-tuple-element.59 = f32[3,32,128]{2,1,0} get-tuple-element(arg_tuple.53), index=5, sharding={devices=[1,2,1]0,1}
constant.69 = s32[] constant(0), sharding={replicated}
compare.70 = pred[] compare(get-tuple-element.54, constant.69), direction=LT, sharding={replicated}
constant.68 = s32[] constant(3), sharding={replicated}
add.71 = s32[] add(get-tuple-element.54, constant.68), sharding={replicated}
select.72 = s32[] select(compare.70, add.71, get-tuple-element.54), sharding={replicated}
dynamic-slice.73 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.59, select.72, constant.69, constant.69), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1}
reshape.74 = f32[32,128]{1,0} reshape(dynamic-slice.73), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(get-tuple-element.55, reshape.74), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
get-tuple-element.60 = f32[3,128,32]{2,1,0} get-tuple-element(arg_tuple.53), index=6, sharding={devices=[1,1,2]0,1}
dynamic-slice.78 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.60, select.72, constant.69, constant.69), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1}
reshape.79 = f32[128,32]{1,0} reshape(dynamic-slice.78), sharding={devices=[1,2]0,1}
dot.1 = f32[4,32]{1,0} dot(dot.0, reshape.79), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.43 = f32[] constant(0), sharding={replicated}
broadcast.2 = f32[4,32]{1,0} broadcast(constant.43), dimensions={}, sharding={devices=[2,1]0,1}
maximum.84 = f32[4,32]{1,0} maximum(dot.1, broadcast.2), sharding={devices=[2,1]0,1}
get-tuple-element.56 = f32[3,4,128]{2,1,0} get-tuple-element(arg_tuple.53), index=2, sharding={devices=[1,2,1]0,1}
reshape.90 = f32[1,4,128]{2,1,0} reshape(dot.0), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.94 = f32[3,4,128]{2,1,0} dynamic-update-slice(get-tuple-element.56, reshape.90, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
get-tuple-element.57 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=3, sharding={devices=[1,2,1]0,1}
compare.85 = pred[4,32]{1,0} compare(dot.1, maximum.84), direction=EQ, sharding={devices=[2,1]0,1}
constant.42 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[4,32]{1,0} broadcast(constant.42), dimensions={}, sharding={devices=[2,1]0,1}
select.86 = f32[4,32]{1,0} select(compare.85, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1}
reshape.95 = f32[1,4,32]{2,1,0} reshape(select.86), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.99 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.57, reshape.95, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
get-tuple-element.58 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=4, sharding={devices=[1,2,1]0,1}
reshape.100 = f32[1,4,32]{2,1,0} reshape(get-tuple-element.55), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.104 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.58, reshape.100, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
ROOT tuple.106 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(add.105, maximum.84, dynamic-update-slice.94, dynamic-update-slice.99, dynamic-update-slice.104, get-tuple-element.59, get-tuple-element.60), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
}
region_1.107 {
arg_tuple.108 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.109 = s32[] get-tuple-element(arg_tuple.108), index=0, sharding={replicated}
constant.116 = s32[] constant(3)
ROOT compare.117 = pred[] compare(get-tuple-element.109, constant.116), direction=LT
}
region_2.126 {
Arg_0.127 = f32[] parameter(0)
Arg_1.128 = f32[] parameter(1)
ROOT add.129 = f32[] add(Arg_0.127, Arg_1.128)
}
wide.wide.region_3.156.clone.clone {
wide_param.7 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
get-tuple-element.185 = s32[] get-tuple-element(wide_param.7), index=0, sharding={replicated}
constant.34 = s32[] constant(1), sharding={replicated}
add.14 = s32[] add(get-tuple-element.185, constant.34), sharding={replicated}
get-tuple-element.186 = f32[4,32]{1,0} get-tuple-element(wide_param.7), index=1, sharding={devices=[2,1]0,1}
get-tuple-element.190 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=5, sharding={devices=[1,2,1]0,1}
constant.35 = s32[] constant(3), sharding={replicated}
subtract.3 = s32[] subtract(constant.35, get-tuple-element.185), sharding={replicated}
constant.6..sunk.4 = s32[] constant(-1), sharding={replicated}
add.15 = s32[] add(subtract.3, constant.6..sunk.4), sharding={replicated}
constant.36 = s32[] constant(0), sharding={replicated}
compare.7 = pred[] compare(add.15, constant.36), direction=LT, sharding={replicated}
constant.26..sunk.1 = s32[] constant(2), sharding={replicated}
add.16 = s32[] add(subtract.3, constant.26..sunk.1), sharding={replicated}
select.4 = s32[] select(compare.7, add.16, add.15), sharding={replicated}
dynamic-slice.15 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.190, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1}
reshape.21 = f32[4,32]{1,0} reshape(dynamic-slice.15), sharding={devices=[2,1]0,1}
multiply.3 = f32[4,32]{1,0} multiply(get-tuple-element.186, reshape.21), sharding={devices=[2,1]0,1}
get-tuple-element.192 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=7, sharding={devices=[1,1,2]0,1}
dynamic-slice.16 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.192, select.4, constant.36, constant.36), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1}
reshape.22 = f32[128,32]{1,0} reshape(dynamic-slice.16), sharding={devices=[1,2]0,1}
dot.20 = f32[4,128]{1,0} dot(multiply.3, reshape.22), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1}
get-tuple-element.191 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=6, sharding={devices=[1,2,1]0,1}
dynamic-slice.17 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.191, select.4, constant.36, constant.36), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1}
reshape.23 = f32[32,128]{1,0} reshape(dynamic-slice.17), sharding={devices=[2,1]0,1}
dot.21 = f32[4,32]{1,0} dot(dot.20, reshape.23), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[1,2]0,1}
get-tuple-element.187 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=2, sharding={devices=[1,2,1]0,1}
get-tuple-element.193 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=8, sharding={devices=[1,2,1]0,1}
dynamic-slice.18 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.193, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1}
reshape.24 = f32[4,32]{1,0} reshape(dynamic-slice.18), sharding={devices=[2,1]0,1}
dot.22 = f32[32,128]{0,1} dot(reshape.24, dot.20), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
reshape.25 = f32[1,32,128]{2,1,0} reshape(dot.22), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.6 = f32[3,32,128]{2,1,0} dynamic-update-slice(get-tuple-element.187, reshape.25, select.4, constant.36, constant.36), sharding={devices=[1,2,1]0,1}
get-tuple-element.188 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=3, sharding={devices=[1,1,2]0,1}
get-tuple-element.189 = f32[3,4,128]{2,1,0} get-tuple-element(wide_param.7), index=4, sharding={devices=[1,2,1]0,1}
dynamic-slice.19 = f32[1,4,128]{2,1,0} dynamic-slice(get-tuple-element.189, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,128}, sharding={devices=[1,2,1]0,1}
reshape.26 = f32[4,128]{1,0} reshape(dynamic-slice.19), sharding={devices=[2,1]0,1}
dot.23 = f32[128,32]{0,1} dot(reshape.26, multiply.3), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
reshape.27 = f32[1,128,32]{2,1,0} reshape(dot.23), sharding={devices=[1,1,2]0,1}
dynamic-update-slice.7 = f32[3,128,32]{2,1,0} dynamic-update-slice(get-tuple-element.188, reshape.27, select.4, constant.36, constant.36), sharding={devices=[1,1,2]0,1}
ROOT tuple.19 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(add.14, dot.21, dynamic-update-slice.6, dynamic-update-slice.7, get-tuple-element.189, get-tuple-element.190, get-tuple-element.191, get-tuple-element.192, get-tuple-element.193), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
}
wide.wide.region_4.218.clone.clone {
wide_param.6 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
get-tuple-element.184 = s32[] get-tuple-element(wide_param.6), index=0, sharding={replicated}
constant.28 = s32[] constant(3)
ROOT compare.6 = pred[] compare(get-tuple-element.184, constant.28), direction=LT
}
ENTRY entry {
Arg_1.2 = f32[3,32,128]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1}
constant.45 = s32[] constant(0), sharding={replicated}
constant.23 = f32[] constant(1), sharding={replicated}
broadcast.24 = f32[4,32]{1,0} broadcast(constant.23), dimensions={}, sharding={devices=[1,2]0,1}
constant.21 = f32[] constant(0), sharding={replicated}
broadcast.22 = f32[3,32,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
broadcast.20 = f32[3,128,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,1,2]0,1}
Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1}
broadcast.28 = f32[3,4,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
broadcast.26 = f32[3,4,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
Arg_2.3 = f32[3,128,32]{2,1,0} parameter(1), sharding={devices=[1,1,2]0,1}
tuple.42 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(constant.45, copy, broadcast.28, broadcast.26, broadcast.26, Arg_1.2, Arg_2.3), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
while.118 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) while(tuple.42), condition=region_1.107, body=region_0.52, sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.179 = f32[3,4,128]{2,1,0} get-tuple-element(while.118), index=2, sharding={devices=[1,2,1]0,1}
get-tuple-element.180 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=3, sharding={devices=[1,2,1]0,1}
get-tuple-element.183 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=4, sharding={devices=[1,2,1]0,1}
tuple.18 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(constant.45, broadcast.24, broadcast.22, broadcast.20, get-tuple-element.179, get-tuple-element.180, Arg_1.2, Arg_2.3, get-tuple-element.183), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
while.3 = (s32[], f3 |
#include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
namespace tensorflow {
namespace {
using tensorflow::errors::InvalidArgument;
Status ValidDim(int64_t dims, int64_t dim) {
if (dim < 0 || dim >= dims) {
return InvalidArgument(
"Each dimension number must be in region [0, rank). Given rank ", dims,
" and dimension number value ", dim);
}
return absl::OkStatus();
}
Status ValidSpatialDimensions(
int64_t dims, const protobuf::RepeatedField<int64_t>& spatial_dimensions) {
if (spatial_dimensions.size() != dims - 2) {
return InvalidArgument(
"Spatial dimensions size must be rank - 2. Given rank ", dims,
" and spatial dimensions size ", spatial_dimensions.size());
}
for (int i = 0; i < spatial_dimensions.size(); ++i) {
TF_RETURN_IF_ERROR(ValidDim(dims, spatial_dimensions.Get(i)));
}
return absl::OkStatus();
}
}
Status UniformQuantizedConvolutionParams::LoadFromAttrs(
const OpKernelConstruction& context) {
return LoadFromAttrsInternal(context);
}
Status UniformQuantizedConvolutionParams::LoadFromAttrs(
const shape_inference::InferenceContext& context) {
return LoadFromAttrsInternal(context);
}
Status UniformQuantizedConvolutionParams::ValidateOrFillParamsAndValidateShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) {
if (lhs_shape.dims() != rhs_shape.dims()) {
return InvalidArgument(
"lhs and rhs must have same dims. Given lhs and rhs of shapes: ",
lhs_shape.DebugString(), rhs_shape.DebugString());
}
const int64_t dims = lhs_shape.dims();
if (dims <= 2) {
return InvalidArgument("lhs and rhs shape dims must be at least 3. Given: ",
dims);
}
const int64_t num_spatial_dims = dims - 2;
if (window_strides_.empty()) {
window_strides_.resize(num_spatial_dims, 1);
} else if (window_strides_.size() != num_spatial_dims) {
return InvalidArgument("Size of window_strides Attr must be dims - 2.");
} else if (!absl::c_all_of(window_strides_,
[](int stride) { return stride >= 1; })) {
return InvalidArgument(
"All elements of window_strides must be >= 1. Given ",
absl::StrJoin(window_strides_, ", "));
}
if (lhs_dilation_.empty()) {
lhs_dilation_.resize(num_spatial_dims, 1);
} else if (lhs_dilation_.size() != num_spatial_dims) {
return InvalidArgument("Size of lhs_dilation Attr must be dims - 2.");
} else if (!absl::c_all_of(lhs_dilation_, [](const int dilation) {
return dilation >= 1;
})) {
return InvalidArgument("All elements of lhs_dilation must be >= 1. Given ",
absl::StrJoin(lhs_dilation_, ", "));
}
if (rhs_dilation_.empty()) {
rhs_dilation_.resize(num_spatial_dims, 1);
} else if (rhs_dilation_.size() != num_spatial_dims) {
return InvalidArgument("Size of rhs_dilation Attr must be dims - 2.");
} else if (!absl::c_all_of(rhs_dilation_, [](const int dilation) {
return dilation >= 1;
})) {
return InvalidArgument("All elements of rhs_dilation must be >= 1. Given ",
absl::StrJoin(rhs_dilation_, ", "));
}
if (dimension_numbers_.input_spatial_dimensions_size() == 0) {
dimension_numbers_.set_input_batch_dimension(0);
dimension_numbers_.set_input_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_input_spatial_dimensions(2 + i);
}
dimension_numbers_.set_kernel_output_feature_dimension(0);
dimension_numbers_.set_kernel_input_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_kernel_spatial_dimensions(2 + i);
}
dimension_numbers_.set_output_batch_dimension(0);
dimension_numbers_.set_output_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_output_spatial_dimensions(2 + i);
}
} else {
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.input_batch_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.input_feature_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.input_spatial_dimensions()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.kernel_input_feature_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.kernel_output_feature_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.kernel_spatial_dimensions()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.output_batch_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.output_batch_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.output_spatial_dimensions()));
}
if (feature_group_count_ <= 0) {
return InvalidArgument(
"feature_group_count must be a positive integer, given: ",
feature_group_count_);
}
const int64_t lhs_feature_count =
lhs_shape.dim_size(dimension_numbers_.input_feature_dimension());
if (lhs_feature_count % feature_group_count_) {
return InvalidArgument(
"feature_group_count must divide lhs feature dimension size, but ",
feature_group_count_, " does not divide ", lhs_feature_count);
}
const int64_t rhs_input_feature_count =
rhs_shape.dim_size(dimension_numbers_.kernel_input_feature_dimension());
if (lhs_feature_count % rhs_input_feature_count) {
return InvalidArgument(
"rhs input feature dimension must divide lhs feature dimension "
"size, but ",
rhs_input_feature_count, " does not divide ", lhs_feature_count);
}
if (lhs_feature_count / feature_group_count_ != rhs_input_feature_count) {
return InvalidArgument(
"lhs feature dimension size divided by feature_group_count must equal "
"the rhs input feature dimension size, but ",
lhs_feature_count, " / ", feature_group_count_,
" != ", rhs_input_feature_count);
}
const int64_t rhs_output_feature_count =
rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension());
if (rhs_output_feature_count % feature_group_count_) {
return InvalidArgument(
"rhs output dimension size must be a multiple of feature_group_count, "
"but ",
rhs_output_feature_count, " is not a multiple of ",
feature_group_count_);
}
if (batch_group_count_ <= 0) {
return InvalidArgument(
"batch_group_count Attr must be a positive integer. Given: ",
batch_group_count_);
}
const int64_t lhs_batch_count =
lhs_shape.dim_size(dimension_numbers_.input_batch_dimension());
if (lhs_batch_count % batch_group_count_) {
return InvalidArgument(
"batch_group_count must divide lhs batch dimension size, but ",
batch_group_count_, " does not divide ", lhs_batch_count);
}
if (rhs_output_feature_count % batch_group_count_) {
return InvalidArgument(
"rhs output dimension size must be a multiple of batch_group_count, "
"but ",
rhs_output_feature_count, " is not a multiple of ", batch_group_count_);
}
return ValidateOrFillPaddingList(lhs_shape, rhs_shape);
}
absl::StatusOr<TensorShape>
UniformQuantizedConvolutionParams::CalculateOutputShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) const {
std::vector<int64_t> output_shape_buf(lhs_shape.dims());
output_shape_buf[dimension_numbers_.output_batch_dimension()] =
lhs_shape.dim_size(dimension_numbers_.input_batch_dimension()) /
batch_group_count_;
output_shape_buf[dimension_numbers_.output_feature_dimension()] =
rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension());
for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size(); ++i) {
const int64_t lhs_size_dilated = DilatedSize(
lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)),
lhs_dilation_[i]);
const int64_t rhs_size_dilated = DilatedSize(
rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)),
rhs_dilation_[i]);
const int64_t output_size_numerator =
lhs_size_dilated + padding_list_[2 * i] + padding_list_[2 * i + 1] -
rhs_size_dilated + 1;
const int64_t output_size_denominator = window_strides_[i];
output_shape_buf[dimension_numbers_.output_spatial_dimensions(i)] =
(output_size_numerator + output_size_denominator - 1) /
output_size_denominator;
}
TensorShape output_shape;
TF_RETURN_IF_ERROR(
TensorShape::BuildTensorShape(output_shape_buf, &output_shape));
return output_shape;
}
template <typename ContextT>
Status UniformQuantizedConvolutionParams::LoadFromAttrsInternal(
const ContextT& context) {
TF_RETURN_IF_ERROR(context.GetAttr("window_strides", &window_strides_));
TF_RETURN_IF_ERROR(context.GetAttr("lhs_dilation", &lhs_dilation_));
TF_RETURN_IF_ERROR(context.GetAttr("rhs_dilation", &rhs_dilation_));
TF_RETURN_IF_ERROR(context.GetAttr("batch_group_count", &batch_group_count_));
TF_RETURN_IF_ERROR(
context.GetAttr("feature_group_count", &feature_group_count_));
TF_RETURN_IF_ERROR(context.GetAttr("padding", &padding_));
TF_RETURN_IF_ERROR(context.GetAttr("explicit_padding", &padding_list_));
if (padding_ != "EXPLICIT" && padding_ != "SAME" && padding_ != "VALID") {
return InvalidArgument(
"padding Attr must be one of [EXPLICIT | SAME | VALID], but given: ",
padding_);
} else if (padding_ != "EXPLICIT" && !padding_list_.empty()) {
return InvalidArgument(
"If padding Attr is not 'EXPLICIT', explicit_padding Attr must be "
"empty. Given padding ",
padding_, " and explicit_padding of size ", padding_list_.size());
}
std::string dimension_numbers_str;
TF_RETURN_IF_ERROR(
context.GetAttr("dimension_numbers", &dimension_numbers_str));
if (dimension_numbers_str.empty()) {
dimension_numbers_.Clear();
} else if (!dimension_numbers_.ParseFromString(dimension_numbers_str)) {
return InvalidArgument("Error parsing convolution dimension numbers.");
}
return absl::OkStatus();
}
Status UniformQuantizedConvolutionParams::ValidateOrFillPaddingList(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) {
const int64_t dims = lhs_shape.dims();
const int64_t padding_list_size = 2 * (dims - 2);
if (padding_ == "EXPLICIT") {
if (padding_list_.size() != padding_list_size) {
return InvalidArgument(
"Size of explicit_padding Attr must be 2 * (rank - 2). Given rank ",
dims, " and explicit_padding of size ", padding_list_.size());
} else if (!absl::c_all_of(padding_list_,
[](int elem) { return elem >= 0; })) {
return InvalidArgument("All explicit_padding elems must be >= 0, Given ",
absl::StrJoin(padding_list_, ", "));
}
} else if (padding_ == "VALID") {
padding_list_.resize(padding_list_size, 0);
} else {
padding_list_.resize(padding_list_size);
for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size();
++i) {
const int64_t stride = window_strides_[i];
const int64_t lhs_size_dilated = DilatedSize(
lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)),
lhs_dilation_[i]);
const int64_t rhs_size_dilated = DilatedSize(
rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)),
rhs_dilation_[i]);
const int64_t output_size = (lhs_size_dilated + stride - 1) / stride;
const int64_t total_padding = std::max(
(output_size - 1) * stride + rhs_size_dilated - lhs_size_dilated,
static_cast<int64_t>(0));
const int64_t padding_begin = total_padding / 2;
const int64_t padding_end = total_padding - padding_begin;
padding_list_[2 * i] = padding_begin;
padding_list_[2 * i + 1] = padding_end;
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using protobuf::TextFormat;
using ::testing::ElementsAreArray;
TEST(UniformQuantizedConvolutionParamsTest, DilatedSize) {
EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(0, 2), 0);
EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(10, 3), 28);
}
TEST(UniformQuantizedConvolutionParamsTest,
ValidateOrFillParamsAndValidateShapeDefaultAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"VALID");
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape({2, 2, 3, 4},
{3, 2, 2, 3}));
EXPECT_THAT(params.window_strides(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.lhs_dilation(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.rhs_dilation(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.padding_list(), ElementsAreArray({0, 0, 0, 0}));
EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(),
ElementsAreArray({2, 3}));
EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(),
ElementsAreArray({2, 3}));
EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(),
ElementsAreArray({2, 3}));
}
TEST(UniformQuantizedConvolutionParamsTest,
ValidateOrFillParamsAndValidateShapeSetAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
UniformQuantizedConvolutionParams params({2, 2},
{3, 3},
{4, 4},
dimension_numbers,
2,
1,
"EXPLICIT",
{1, 1, 2, 2});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape({2, 3, 4, 2},
{2, 3, 1, 2}));
EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 3);
EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(),
ElementsAreArray({1, 2}));
EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 3);
EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 2);
EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(),
ElementsAreArray({0, 1}));
EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 3);
EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(),
ElementsAreArray({1, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeDefaultAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"VALID");
const TensorShape lhs_shape({2, 2, 3, 4});
const TensorShape rhs_shape({3, 2, 2, 3});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 2, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeSetAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
UniformQuantizedConvolutionParams params({2, 2},
{3, 3},
{4, 4},
dimension_numbers,
2,
1,
"EXPLICIT",
{1, 1, 2, 2});
const TensorShape lhs_shape({2, 3, 4, 2});
const TensorShape rhs_shape({2, 3, 1, 2});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 3, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateSameOptionPadding) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"SAME");
const TensorShape lhs_shape({2, 2, 3, 4});
const TensorShape rhs_shape({3, 2, 4, 3});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 2, 1, 1}));
}
}
} | absl::StatusOr<TensorShape>
UniformQuantizedConvolutionParams::CalculateOutputShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) const {
std::vector<int64_t> output_shape_buf(lhs_shape.dims());
output_shape_buf[dimension_numbers_.output_batch_dimension()] =
lhs_shape.dim_size(dimension_numbers_.input_batch_dimension()) /
batch_group_count_;
output_shape_buf[dimension_numbers_.output_feature_dimension()] =
rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension());
for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size(); ++i) {
const int64_t lhs_size_dilated = DilatedSize(
lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)),
lhs_dilation_[i]);
const int64_t rhs_size_dilated = DilatedSize(
rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)),
rhs_dilation_[i]);
const int64_t output_size_numerator =
lhs_size_dilated + padding_list_[2 * i] + padding_list_[2 * i + 1] -
rhs_size_dilated + 1;
const int64_t output_size_denominator = window_strides_[i];
output_shape_buf[dimension_numbers_.output_spatial_dimensions(i)] =
(output_size_numerator + output_size_denominator - 1) /
output_size_denominator;
}
TensorShape output_shape;
TF_RETURN_IF_ERROR(
TensorShape::BuildTensorShape(output_shape_buf, &output_shape));
return output_shape;
} | TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeDefaultAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"VALID");
const TensorShape lhs_shape({2, 2, 3, 4});
const TensorShape rhs_shape({3, 2, 2, 3});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 2, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeSetAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
UniformQuantizedConvolutionParams params({2, 2},
{3, 3},
{4, 4},
dimension_numbers,
2,
1,
"EXPLICIT",
{1, 1, 2, 2});
const TensorShape lhs_shape({2, 3, 4, 2});
const TensorShape rhs_shape({2, 3, 1, 2});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 3, 2}));
} |
#include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h"
#include <string>
#include <string_view>
#include "absl/time/time.h"
#include "tensorflow/c/experimental/next_pluggable_device/c_api.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace {
absl::StatusOr<std::string> ProcessGetKeyValueResult(TF_Buffer* result_buf,
TF_Status* status) {
if (TF_GetCode(status) != TF_OK) {
return StatusFromTF_Status(status);
} else {
std::string result{static_cast<const char*>(result_buf->data),
result_buf->length};
TF_DeleteBuffer(result_buf);
return result;
}
}
}
Status CPluginCoordinationServiceAgent::InsertKeyValue(std::string_view key,
std::string_view value) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_CoordinationServiceInsertKeyValue(key.data(), key.size(), value.data(),
value.size(), agent_, status);
return StatusFromTF_Status(status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue(
std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf =
TF_CoordinationServiceGetKeyValue(key.data(), key.size(), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue(
std::string_view key, absl::Duration timeout) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf = TF_CoordinationServiceGetKeyValueWithTimeout(
key.data(), key.size(), absl::ToInt64Seconds(timeout), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::TryGetKeyValue(
std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf = TF_CoordinationServiceTryGetKeyValue(
key.data(), key.size(), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
Status CPluginCoordinationServiceAgent::DeleteKeyValue(std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_CoordinationServiceDeleteKeyValue(key.data(), key.size(), agent_, status);
return StatusFromTF_Status(status);
}
} | #include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h"
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/coordination_config.pb.h"
#include "tsl/protobuf/coordination_service.pb.h"
namespace tensorflow {
namespace {
using tsl::CoordinationClient;
using tsl::CoordinationServiceAgent;
using tsl::CallOptions;
using tsl::DeleteKeyValueRequest;
using tsl::DeleteKeyValueResponse;
using tsl::GetKeyValueRequest;
using tsl::GetKeyValueResponse;
using tsl::InsertKeyValueRequest;
using tsl::InsertKeyValueResponse;
using ::testing::_;
using ::testing::DoAll;
using ::testing::InvokeArgument;
using ::testing::Pointee;
using ::testing::SetArgPointee;
using ::testing::WithArgs;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.DebugString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p,
::testing::MatchResultListener*) const {
return p.DebugString() == expected_;
}
void DescribeTo(std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
MATCHER(KvEq, "simple KeyValueEntry matcher") {
const KeyValueEntry& kv0 = std::get<0>(arg);
const KeyValueEntry& kv1 = std::get<1>(arg);
return kv0.key() == kv1.key() && kv0.value() == kv1.value();
}
class TestCoordinationClient : public CoordinationClient {
public:
TestCoordinationClient() = default;
MOCK_METHOD(void, GetKeyValueAsync,
(CallOptions * call_opts, const GetKeyValueRequest*,
GetKeyValueResponse*, StatusCallback),
(override));
MOCK_METHOD(void, TryGetKeyValueAsync,
(const TryGetKeyValueRequest*, TryGetKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, InsertKeyValueAsync,
(const InsertKeyValueRequest*, InsertKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, DeleteKeyValueAsync,
(const DeleteKeyValueRequest*, DeleteKeyValueResponse*,
StatusCallback),
(override));
void GetKeyValueDirAsync(const tsl::GetKeyValueDirRequest* request,
tsl::GetKeyValueDirResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("GetKeyValueDirAsync"));
}
void ResetTaskAsync(const tsl::ResetTaskRequest* request,
tsl::ResetTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ResetTaskAsync"));
}
void ReportErrorToServiceAsync(
const tsl::ReportErrorToServiceRequest* request,
tsl::ReportErrorToServiceResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToServiceAsync"));
}
void BarrierAsync(const tsl::BarrierRequest* request,
tsl::BarrierResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("BarrierAsync"));
}
void GetTaskStateAsync(const tsl::GetTaskStateRequest* request,
tsl::GetTaskStateResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("GetTaskStateAsync"));
}
void WaitForAllTasksAsync(const tsl::WaitForAllTasksRequest* request,
tsl::WaitForAllTasksResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("WaitForAllTasksAsync"));
}
void CancelBarrierAsync(const tsl::CancelBarrierRequest* request,
tsl::CancelBarrierResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("CancelBarrierAsync"));
}
void RegisterTaskAsync(tsl::CallOptions*,
const tsl::RegisterTaskRequest* request,
tsl::RegisterTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("RegisterTaskAsync"));
}
void ShutdownTaskAsync(tsl::CallOptions*,
const tsl::ShutdownTaskRequest* request,
tsl::ShutdownTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ShutdownTaskAsync"));
}
void HeartbeatAsync(tsl::CallOptions*, const tsl::HeartbeatRequest* request,
tsl::HeartbeatResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("HeartbeatAsync"));
}
void ReportErrorToTaskAsync(CallOptions* call_opts,
const ReportErrorToTaskRequest* request,
ReportErrorToTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToTaskAsync"));
}
};
class CPluginCoordinationServiceAgentTest : public ::testing::Test {
public:
void InitializeAgent(CoordinationServiceConfig config = {}) {
config.set_service_leader("test_leader");
TF_ASSERT_OK(impl_->Initialize(
tsl::Env::Default(), "test_job",
0, config, std::move(client_),
[](Status s) {
LOG(ERROR) << "Coordination agent is set to error: " << s;
}));
}
TestCoordinationClient* GetClient() {
CHECK(client_ != nullptr)
<< "GetClient() was called after InitializeAgent()";
return client_.get();
}
protected:
std::unique_ptr<CoordinationServiceAgent> impl_ =
tsl::CreateCoordinationServiceAgent();
std::unique_ptr<CPluginCoordinationServiceAgent> agent_ =
std::make_unique<CPluginCoordinationServiceAgent>(impl_.get());
std::unique_ptr<TestCoordinationClient> client_ =
std::make_unique<TestCoordinationClient>();
};
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Simple_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_WithTimeout_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(10));
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Timeout_ReturnError) {
const std::string test_key = "test_key";
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<3>([&](StatusCallback done) {
owned_done = done;
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(1));
EXPECT_EQ(result.status().code(), error::DEADLINE_EXCEEDED);
owned_done(absl::CancelledError("error"));
}
TEST_F(CPluginCoordinationServiceAgentTest,
GetKeyValue_ZeroTimeout_ReturnError) {
const std::string test_key = "test_key";
auto result = agent_->GetKeyValue(test_key, absl::ZeroDuration());
EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT);
}
TEST_F(CPluginCoordinationServiceAgentTest,
GetKeyValue_NegativeTimeout_ReturnError) {
const std::string test_key = "test_key";
auto result = agent_->GetKeyValue(test_key, absl::Seconds(-1));
EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT);
}
TEST_F(CPluginCoordinationServiceAgentTest, InsertKeyValue_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
InsertKeyValueRequest expected_input;
auto kv = expected_input.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
EXPECT_CALL(*GetClient(),
InsertKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->InsertKeyValue(test_key, test_value));
}
TEST_F(CPluginCoordinationServiceAgentTest, DeleteKeyValue_Success) {
const std::string test_key = "test_x_key";
DeleteKeyValueRequest expected_input;
expected_input.set_key(test_key);
expected_input.set_is_directory(true);
EXPECT_CALL(*GetClient(),
DeleteKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->DeleteKeyValue(test_key));
}
TEST_F(CPluginCoordinationServiceAgentTest, TryGetKeyValue_Simple_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
TryGetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), TryGetKeyValueAsync(_, _, _))
.WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
InvokeArgument<2>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->TryGetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
}
} | Status CPluginCoordinationServiceAgent::DeleteKeyValue(std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_CoordinationServiceDeleteKeyValue(key.data(), key.size(), agent_, status);
return StatusFromTF_Status(status);
} | TEST_F(CPluginCoordinationServiceAgentTest, DeleteKeyValue_Success) {
const std::string test_key = "test_x_key";
DeleteKeyValueRequest expected_input;
expected_input.set_key(test_key);
expected_input.set_is_directory(true);
EXPECT_CALL(*GetClient(),
DeleteKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->DeleteKeyValue(test_key));
} |
#include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ShardDatasetOp::kDatasetType;
constexpr const char* const ShardDatasetOp::kInputDataset;
constexpr const char* const ShardDatasetOp::kNumShards;
constexpr const char* const ShardDatasetOp::kIndex;
constexpr const char* const ShardDatasetOp::kRequireNonEmpty;
constexpr const char* const ShardDatasetOp::kOutputTypes;
constexpr const char* const ShardDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kNextIndex[] = "next_index";
constexpr char kFileShardErrorMessage[] =
"If you are using datasets with distribution strategy, consider setting "
"the auto sharding policy to either DATA or OFF using the "
"`experimental_distribute.auto_shard_policy` option of `tf.data.Options()`."
" Or, split your input files into a larger number of small files such that "
"number of files is greater than number of shards/workers.";
class ShardDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t num_shards, int64_t index,
bool require_non_empty, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)),
num_shards_(num_shards),
index_(index),
input_(input),
require_non_empty_(require_non_empty),
traceme_metadata_(
{{"index", strings::Printf("%lld", static_cast<long long>(index))},
{"num_shards",
strings::Printf("%lld", static_cast<long long>(num_shards))}}) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(num_shards_, index_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / num_shards_ + (index_ < n % num_shards_ ? 1 : 0);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index_ + (num_shards_ * index), out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* num_shards = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(num_shards_, &num_shards));
Node* index = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(index_, &index));
AttrValue require_non_empty_attr;
b->BuildAttrValue(require_non_empty_, &require_non_empty_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, num_shards, index},
{{kRequireNonEmpty, require_non_empty_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), next_index_(0), element_count_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (dataset()->num_shards_ == kShardHint) {
return errors::FailedPrecondition(
"`tf.data.Dataset.shard(SHARD_HINT, ...)` can only be used in "
"`tf.distribute.Strategy.experimental_distribute_dataset()` with "
"`tf.data.experimental.AutoShardPolicy.HINT` policy, or tf.data "
"service with "
"`tf.data.experimental.service.ShardingPolicy.HINT` processing "
"mode.");
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (ctx->index_mapper() != nullptr) {
return Get(ctx, out_tensors, end_of_sequence);
}
int num_to_skip =
(dataset()->index_ - next_index_) % dataset()->num_shards_;
if (num_to_skip < 0) {
num_to_skip += dataset()->num_shards_;
}
int num_skipped;
TF_RETURN_IF_ERROR(
input_impl_->Skip(ctx, num_to_skip, end_of_sequence, &num_skipped));
next_index_ += num_skipped;
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
std::vector<Tensor> result;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &result, end_of_sequence));
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
next_index_++;
if (dataset()->require_non_empty_ &&
next_index_ < dataset()->num_shards_) {
int num_skipped;
Status s = input_impl_->Skip(ctx, dataset()->num_shards_ - next_index_,
end_of_sequence, &num_skipped);
if (*end_of_sequence || errors::IsOutOfRange(s)) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: the dataset only has ",
next_index_, " file(s), which is not enough for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
} else if (!s.ok()) {
return s;
}
next_index_ = dataset()->num_shards_;
}
*out_tensors = std::move(result);
return absl::OkStatus();
}
Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
auto merge_checkpoint = gtl::MakeCleanup([&ctx_with_index_mapper] {
ctx_with_index_mapper.MergeCheckpoint();
});
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
if (*end_of_sequence && dataset()->require_non_empty_ &&
element_count_ == 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: The dataset does not have "
"enough file(s) for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
}
++element_count_;
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t num_shards = dataset()->num_shards_;
int64_t shard_index = dataset()->index_;
return [parent_index_mapper, num_shards,
shard_index](size_t element_position) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t output_index,
parent_index_mapper(element_position));
return output_index * num_shards + shard_index;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(
std::move(args), 1.0 / static_cast<double>(dataset()->num_shards_));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNextIndex, next_index_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (ctx->restored_element_count().has_value()) {
element_count_ = *ctx->restored_element_count();
return RestoreInput(ctx, reader, input_impl_);
}
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kNextIndex, &next_index_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t next_index_ TF_GUARDED_BY(mu_);
size_t element_count_ TF_GUARDED_BY(mu_);
};
const int64_t num_shards_;
const int64_t index_;
const DatasetBase* const input_;
const bool require_non_empty_;
const TraceMeMetadata traceme_metadata_;
absl::Status random_indexing_compatible_;
};
ShardDatasetOp::ShardDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kRequireNonEmpty, &require_non_empty_));
}
void ShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t index = 0;
int64_t num_shards = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kNumShards, &num_shards));
OP_REQUIRES(
ctx, num_shards > 0 || num_shards == kShardHint,
errors::InvalidArgument("Number of shards must be greater than zero "
"(currently num_shards = ",
num_shards, ")."));
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kIndex, &index));
OP_REQUIRES(
ctx, (index >= 0 && index < num_shards) || num_shards == kShardHint,
errors::InvalidArgument("Index must be between 0 and ", num_shards - 1,
" (currently index = ", index, ")."));
*output = new Dataset(ctx, num_shards, index, require_non_empty_, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ShardDataset").Device(DEVICE_CPU),
ShardDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "shard_dataset";
class ShardDatasetParams : public DatasetParams {
public:
template <typename T>
ShardDatasetParams(T input_dataset_params, int64_t num_shards, int64_t index,
bool require_non_empty, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_shards_(num_shards),
index_(index),
require_non_empty_(require_non_empty) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return CreateTensors<int64_t>(TensorShape({}), {{num_shards_}, {index_}});
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(ShardDatasetOp::kInputDataset);
input_names->emplace_back(ShardDatasetOp::kNumShards);
input_names->emplace_back(ShardDatasetOp::kIndex);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("require_non_empty", require_non_empty_);
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return ShardDatasetOp::kDatasetType; }
private:
int64_t num_shards_;
int64_t index_;
bool require_non_empty_;
};
class ShardDatasetOpTest : public DatasetOpsTestBase {};
ShardDatasetParams ShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
0,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 1, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
7,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams5() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
4,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams6() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
4,
3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams7() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParamsWithNoElemForEachShard() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
7,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
-3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
-3,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
0,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<ShardDatasetParams>> GetNextTestCases() {
return {
{ShardDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{}},
{ShardDatasetParams4(),
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_GET_NEXT_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
GetNextTestCases())
TEST_F(ShardDatasetOpTest, DatasetNodeName) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ShardDatasetOpTest, DatasetTypeString) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ShardDatasetOp::kDatasetType)));
}
TEST_F(ShardDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<ShardDatasetParams>> CardinalityTestCases() {
return {{ShardDatasetParams1(),
2},
{ShardDatasetParams2(),
2},
{ShardDatasetParams3(),
0},
{ShardDatasetParams4(),
1},
{ShardDatasetParams5(),
2},
{ShardDatasetParams6(),
2},
{ShardDatasetParams7(),
1}};
}
DATASET_CARDINALITY_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
CardinalityTestCases())
TEST_F(ShardDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(ShardDatasetOpTest, IteratorPrefix) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ShardDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ShardDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{ShardDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{0, 1},
{}},
{ShardDatasetParams4(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ShardDatasetOpTest, NoElemForEachShard) {
auto dataset_params = InvalidShardDatasetParamsWithNoElemForEachShard();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(ShardDatasetOpTest, InvalidArguments) {
std::vector<ShardDatasetParams> invalid_dataset_params = {
InvalidShardDatasetParams1(), InvalidShardDatasetParams2(),
InvalidShardDatasetParams3(), InvalidShardDatasetParams4()};
for (const auto& dataset_params : invalid_dataset_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
} | TEST_F(ShardDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
} |
#ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_DOUBLE_WRAPPER_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_DOUBLE_WRAPPER_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class DoubleWrapperType;
class DoubleWrapperTypeView;
class DoubleWrapperType final {
public:
using view_alternative_type = DoubleWrapperTypeView;
static constexpr TypeKind kKind = TypeKind::kDoubleWrapper;
static constexpr absl::string_view kName = "google.protobuf.DoubleValue";
explicit DoubleWrapperType(DoubleWrapperTypeView);
DoubleWrapperType() = default;
DoubleWrapperType(const DoubleWrapperType&) = default;
DoubleWrapperType(DoubleWrapperType&&) = default;
DoubleWrapperType& operator=(const DoubleWrapperType&) = default;
DoubleWrapperType& operator=(DoubleWrapperType&&) = default;
constexpr TypeKind kind() const { return kKind; }
constexpr absl::string_view name() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return kName;
}
absl::Span<const Type> parameters() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return {};
}
std::string DebugString() const { return std::string(name()); }
constexpr void swap(DoubleWrapperType&) noexcept {}
};
inline constexpr void swap(DoubleWrapperType& lhs,
DoubleWrapperType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(DoubleWrapperType, DoubleWrapperType) {
return true;
}
inline constexpr bool operator!=(DoubleWrapperType lhs, DoubleWrapperType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, DoubleWrapperType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out,
const DoubleWrapperType& type) {
return out << type.DebugString();
}
class DoubleWrapperTypeView final {
public:
using alternative_type = DoubleWrapperType;
static constexpr TypeKind kKind = DoubleWrapperType::kKind;
static constexpr absl::string_view kName = DoubleWrapperType::kName;
DoubleWrapperTypeView(
const DoubleWrapperType& type ABSL_ATTRIBUTE_LIFETIME_BOUND
ABSL_ATTRIBUTE_UNUSED) noexcept {}
DoubleWrapperTypeView& operator=(
const DoubleWrapperType& type ABSL_ATTRIBUTE_LIFETIME_BOUND
ABSL_ATTRIBUTE_UNUSED) {
return *this;
}
DoubleWrapperTypeView& operator=(DoubleWrapperType&&) = delete;
DoubleWrapperTypeView() = default;
DoubleWrapperTypeView(const DoubleWrapperTypeView&) = default;
DoubleWrapperTypeView(DoubleWrapperTypeView&&) = default;
DoubleWrapperTypeView& operator=(const DoubleWrapperTypeView&) = default;
DoubleWrapperTypeView& operator=(DoubleWrapperTypeView&&) = default;
constexpr TypeKind kind() const { return kKind; }
constexpr absl::string_view name() const { return kName; }
absl::Span<const Type> parameters() const { return {}; }
std::string DebugString() const { return std::string(name()); }
constexpr void swap(DoubleWrapperTypeView&) noexcept {}
};
inline constexpr void swap(DoubleWrapperTypeView& lhs,
DoubleWrapperTypeView& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(DoubleWrapperTypeView, DoubleWrapperTypeView) {
return true;
}
inline constexpr bool operator!=(DoubleWrapperTypeView lhs,
DoubleWrapperTypeView rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, DoubleWrapperTypeView) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, DoubleWrapperTypeView type) {
return out << type.DebugString();
}
inline DoubleWrapperType::DoubleWrapperType(DoubleWrapperTypeView) {}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/native_type.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
TEST(DoubleWrapperType, Kind) {
EXPECT_EQ(DoubleWrapperType().kind(), DoubleWrapperType::kKind);
EXPECT_EQ(Type(DoubleWrapperType()).kind(), DoubleWrapperType::kKind);
}
TEST(DoubleWrapperType, Name) {
EXPECT_EQ(DoubleWrapperType().name(), DoubleWrapperType::kName);
EXPECT_EQ(Type(DoubleWrapperType()).name(), DoubleWrapperType::kName);
}
TEST(DoubleWrapperType, DebugString) {
{
std::ostringstream out;
out << DoubleWrapperType();
EXPECT_EQ(out.str(), DoubleWrapperType::kName);
}
{
std::ostringstream out;
out << Type(DoubleWrapperType());
EXPECT_EQ(out.str(), DoubleWrapperType::kName);
}
}
TEST(DoubleWrapperType, Hash) {
EXPECT_EQ(absl::HashOf(DoubleWrapperType()),
absl::HashOf(DoubleWrapperType()));
}
TEST(DoubleWrapperType, Equal) {
EXPECT_EQ(DoubleWrapperType(), DoubleWrapperType());
EXPECT_EQ(Type(DoubleWrapperType()), DoubleWrapperType());
EXPECT_EQ(DoubleWrapperType(), Type(DoubleWrapperType()));
EXPECT_EQ(Type(DoubleWrapperType()), Type(DoubleWrapperType()));
}
TEST(DoubleWrapperType, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(DoubleWrapperType()),
NativeTypeId::For<DoubleWrapperType>());
EXPECT_EQ(NativeTypeId::Of(Type(DoubleWrapperType())),
NativeTypeId::For<DoubleWrapperType>());
}
TEST(DoubleWrapperType, InstanceOf) {
EXPECT_TRUE(InstanceOf<DoubleWrapperType>(DoubleWrapperType()));
EXPECT_TRUE(InstanceOf<DoubleWrapperType>(Type(DoubleWrapperType())));
}
TEST(DoubleWrapperType, Cast) {
EXPECT_THAT(Cast<DoubleWrapperType>(DoubleWrapperType()),
An<DoubleWrapperType>());
EXPECT_THAT(Cast<DoubleWrapperType>(Type(DoubleWrapperType())),
An<DoubleWrapperType>());
}
TEST(DoubleWrapperType, As) {
EXPECT_THAT(As<DoubleWrapperType>(DoubleWrapperType()), Ne(absl::nullopt));
EXPECT_THAT(As<DoubleWrapperType>(Type(DoubleWrapperType())),
Ne(absl::nullopt));
}
TEST(DoubleWrapperTypeView, Kind) {
EXPECT_EQ(DoubleWrapperTypeView().kind(), DoubleWrapperTypeView::kKind);
EXPECT_EQ(TypeView(DoubleWrapperTypeView()).kind(),
DoubleWrapperTypeView::kKind);
}
TEST(DoubleWrapperTypeView, Name) {
EXPECT_EQ(DoubleWrapperTypeView().name(), DoubleWrapperTypeView::kName);
EXPECT_EQ(TypeView(DoubleWrapperTypeView()).name(),
DoubleWrapperTypeView::kName);
}
TEST(DoubleWrapperTypeView, DebugString) {
{
std::ostringstream out;
out << DoubleWrapperTypeView();
EXPECT_EQ(out.str(), DoubleWrapperTypeView::kName);
}
{
std::ostringstream out;
out << TypeView(DoubleWrapperTypeView());
EXPECT_EQ(out.str(), DoubleWrapperTypeView::kName);
}
}
TEST(DoubleWrapperTypeView, Hash) {
EXPECT_EQ(absl::HashOf(DoubleWrapperTypeView()),
absl::HashOf(DoubleWrapperTypeView()));
EXPECT_EQ(absl::HashOf(DoubleWrapperTypeView()),
absl::HashOf(DoubleWrapperType()));
}
TEST(DoubleWrapperTypeView, Equal) {
EXPECT_EQ(DoubleWrapperTypeView(), DoubleWrapperTypeView());
EXPECT_EQ(TypeView(DoubleWrapperTypeView()), DoubleWrapperTypeView());
EXPECT_EQ(DoubleWrapperTypeView(), TypeView(DoubleWrapperTypeView()));
EXPECT_EQ(TypeView(DoubleWrapperTypeView()),
TypeView(DoubleWrapperTypeView()));
EXPECT_EQ(DoubleWrapperTypeView(), DoubleWrapperType());
EXPECT_EQ(TypeView(DoubleWrapperTypeView()), DoubleWrapperType());
EXPECT_EQ(TypeView(DoubleWrapperTypeView()), Type(DoubleWrapperType()));
EXPECT_EQ(DoubleWrapperType(), DoubleWrapperTypeView());
EXPECT_EQ(DoubleWrapperType(), DoubleWrapperTypeView());
EXPECT_EQ(DoubleWrapperType(), TypeView(DoubleWrapperTypeView()));
EXPECT_EQ(Type(DoubleWrapperType()), TypeView(DoubleWrapperTypeView()));
EXPECT_EQ(DoubleWrapperTypeView(), DoubleWrapperType());
}
TEST(DoubleWrapperTypeView, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(DoubleWrapperTypeView()),
NativeTypeId::For<DoubleWrapperTypeView>());
EXPECT_EQ(NativeTypeId::Of(TypeView(DoubleWrapperTypeView())),
NativeTypeId::For<DoubleWrapperTypeView>());
}
TEST(DoubleWrapperTypeView, InstanceOf) {
EXPECT_TRUE(InstanceOf<DoubleWrapperTypeView>(DoubleWrapperTypeView()));
EXPECT_TRUE(
InstanceOf<DoubleWrapperTypeView>(TypeView(DoubleWrapperTypeView())));
}
TEST(DoubleWrapperTypeView, Cast) {
EXPECT_THAT(Cast<DoubleWrapperTypeView>(DoubleWrapperTypeView()),
An<DoubleWrapperTypeView>());
EXPECT_THAT(Cast<DoubleWrapperTypeView>(TypeView(DoubleWrapperTypeView())),
An<DoubleWrapperTypeView>());
}
TEST(DoubleWrapperTypeView, As) {
EXPECT_THAT(As<DoubleWrapperTypeView>(DoubleWrapperTypeView()),
Ne(absl::nullopt));
EXPECT_THAT(As<DoubleWrapperTypeView>(TypeView(DoubleWrapperTypeView())),
Ne(absl::nullopt));
}
}
} | inline std::ostream& operator<<(std::ostream& out,
const DoubleWrapperType& type) {
return out << type.DebugString();
} | TEST(DoubleWrapperType, DebugString) {
{
std::ostringstream out;
out << DoubleWrapperType();
EXPECT_EQ(out.str(), DoubleWrapperType::kName);
}
{
std::ostringstream out;
out << Type(DoubleWrapperType());
EXPECT_EQ(out.str(), DoubleWrapperType::kName);
}
} |
#include "quiche/common/quiche_random.h"
#include <cstdint>
#include <cstring>
#include "openssl/rand.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace {
inline uint64_t Xoshiro256InitializeRngStateMember() {
uint64_t result;
RAND_bytes(reinterpret_cast<uint8_t*>(&result), sizeof(result));
return result;
}
inline uint64_t Xoshiro256PlusPlusRotLeft(uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
uint64_t Xoshiro256PlusPlus() {
static thread_local uint64_t rng_state[4] = {
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember()};
const uint64_t result =
Xoshiro256PlusPlusRotLeft(rng_state[0] + rng_state[3], 23) + rng_state[0];
const uint64_t t = rng_state[1] << 17;
rng_state[2] ^= rng_state[0];
rng_state[3] ^= rng_state[1];
rng_state[1] ^= rng_state[2];
rng_state[0] ^= rng_state[3];
rng_state[2] ^= t;
rng_state[3] = Xoshiro256PlusPlusRotLeft(rng_state[3], 45);
return result;
}
class DefaultQuicheRandom : public QuicheRandom {
public:
DefaultQuicheRandom() {}
DefaultQuicheRandom(const DefaultQuicheRandom&) = delete;
DefaultQuicheRandom& operator=(const DefaultQuicheRandom&) = delete;
~DefaultQuicheRandom() override {}
void RandBytes(void* data, size_t len) override;
uint64_t RandUint64() override;
void InsecureRandBytes(void* data, size_t len) override;
uint64_t InsecureRandUint64() override;
};
void DefaultQuicheRandom::RandBytes(void* data, size_t len) {
RAND_bytes(reinterpret_cast<uint8_t*>(data), len);
}
uint64_t DefaultQuicheRandom::RandUint64() {
uint64_t value;
RandBytes(&value, sizeof(value));
return value;
}
void DefaultQuicheRandom::InsecureRandBytes(void* data, size_t len) {
while (len >= sizeof(uint64_t)) {
uint64_t random_bytes64 = Xoshiro256PlusPlus();
memcpy(data, &random_bytes64, sizeof(uint64_t));
data = reinterpret_cast<char*>(data) + sizeof(uint64_t);
len -= sizeof(uint64_t);
}
if (len > 0) {
QUICHE_DCHECK_LT(len, sizeof(uint64_t));
uint64_t random_bytes64 = Xoshiro256PlusPlus();
memcpy(data, &random_bytes64, len);
}
}
uint64_t DefaultQuicheRandom::InsecureRandUint64() {
return Xoshiro256PlusPlus();
}
}
QuicheRandom* QuicheRandom::GetInstance() {
static DefaultQuicheRandom* random = new DefaultQuicheRandom();
return random;
}
} | #include "quiche/common/quiche_random.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace {
TEST(QuicheRandom, RandBytes) {
unsigned char buf1[16];
unsigned char buf2[16];
memset(buf1, 0xaf, sizeof(buf1));
memset(buf2, 0xaf, sizeof(buf2));
ASSERT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
auto rng = QuicheRandom::GetInstance();
rng->RandBytes(buf1, sizeof(buf1));
EXPECT_NE(0, memcmp(buf1, buf2, sizeof(buf1)));
}
TEST(QuicheRandom, RandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->RandUint64();
uint64_t value2 = rng->RandUint64();
EXPECT_NE(value1, value2);
}
TEST(QuicheRandom, InsecureRandBytes) {
unsigned char buf1[16];
unsigned char buf2[16];
memset(buf1, 0xaf, sizeof(buf1));
memset(buf2, 0xaf, sizeof(buf2));
ASSERT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
auto rng = QuicheRandom::GetInstance();
rng->InsecureRandBytes(buf1, sizeof(buf1));
EXPECT_NE(0, memcmp(buf1, buf2, sizeof(buf1)));
}
TEST(QuicheRandom, InsecureRandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->InsecureRandUint64();
uint64_t value2 = rng->InsecureRandUint64();
EXPECT_NE(value1, value2);
}
}
} | uint64_t RandUint64() override;
void InsecureRandBytes(void* data, size_t len) override;
uint64_t InsecureRandUint64() override;
};
void DefaultQuicheRandom::RandBytes(void* data, size_t len) {
RAND_bytes(reinterpret_cast<uint8_t*>(data), len);
} | TEST(QuicheRandom, RandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->RandUint64();
uint64_t value2 = rng->RandUint64();
EXPECT_NE(value1, value2);
} |
#include "tsl/lib/io/buffered_inputstream.h"
#include "absl/status/status.h"
#include "tsl/lib/io/random_inputstream.h"
namespace tsl {
namespace io {
BufferedInputStream::BufferedInputStream(InputStreamInterface* input_stream,
size_t buffer_bytes,
bool owns_input_stream)
: input_stream_(input_stream),
size_(buffer_bytes),
owns_input_stream_(owns_input_stream) {
buf_.reserve(size_);
}
BufferedInputStream::BufferedInputStream(RandomAccessFile* file,
size_t buffer_bytes)
: BufferedInputStream(new RandomAccessInputStream(file), buffer_bytes,
true) {}
BufferedInputStream::~BufferedInputStream() {
if (owns_input_stream_) {
delete input_stream_;
}
}
absl::Status BufferedInputStream::FillBuffer() {
if (!file_status_.ok()) {
pos_ = 0;
limit_ = 0;
return file_status_;
}
absl::Status s = input_stream_->ReadNBytes(size_, &buf_);
pos_ = 0;
limit_ = buf_.size();
if (!s.ok()) {
file_status_ = s;
}
return s;
}
template <typename StringType>
absl::Status BufferedInputStream::ReadLineHelper(StringType* result,
bool include_eol) {
result->clear();
absl::Status s;
size_t start_pos = pos_;
while (true) {
if (pos_ == limit_) {
result->append(buf_.data() + start_pos, pos_ - start_pos);
s = FillBuffer();
if (limit_ == 0) {
break;
}
start_pos = pos_;
}
char c = buf_[pos_];
if (c == '\n') {
result->append(buf_.data() + start_pos, pos_ - start_pos);
if (include_eol) {
result->append(1, c);
}
pos_++;
return absl::OkStatus();
}
if (c == '\r') {
result->append(buf_.data() + start_pos, pos_ - start_pos);
start_pos = pos_ + 1;
}
pos_++;
}
if (absl::IsOutOfRange(s) && !result->empty()) {
return absl::OkStatus();
}
return s;
}
absl::Status BufferedInputStream::ReadNBytes(int64_t bytes_to_read,
tstring* result) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
result->clear();
if (pos_ == limit_ && !file_status_.ok() && bytes_to_read > 0) {
return file_status_;
}
result->reserve(bytes_to_read);
absl::Status s;
while (result->size() < static_cast<size_t>(bytes_to_read)) {
if (pos_ == limit_) {
s = FillBuffer();
if (limit_ == 0) {
DCHECK(!s.ok());
file_status_ = s;
break;
}
}
const int64_t bytes_to_copy =
std::min<int64_t>(limit_ - pos_, bytes_to_read - result->size());
result->insert(result->size(), buf_, pos_, bytes_to_copy);
pos_ += bytes_to_copy;
}
if (absl::IsOutOfRange(s) &&
(result->size() == static_cast<size_t>(bytes_to_read))) {
return absl::OkStatus();
}
return s;
}
absl::Status BufferedInputStream::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can only skip forward, not ",
bytes_to_skip);
}
if (pos_ + bytes_to_skip < limit_) {
pos_ += bytes_to_skip;
} else {
absl::Status s = input_stream_->SkipNBytes(bytes_to_skip - (limit_ - pos_));
pos_ = 0;
limit_ = 0;
if (absl::IsOutOfRange(s)) {
file_status_ = s;
}
return s;
}
return absl::OkStatus();
}
int64_t BufferedInputStream::Tell() const {
return input_stream_->Tell() - (limit_ - pos_);
}
absl::Status BufferedInputStream::Seek(int64_t position) {
if (position < 0) {
return errors::InvalidArgument("Seeking to a negative position: ",
position);
}
const int64_t buf_lower_limit = input_stream_->Tell() - limit_;
if (position < buf_lower_limit) {
TF_RETURN_IF_ERROR(Reset());
return SkipNBytes(position);
}
if (position < Tell()) {
pos_ -= Tell() - position;
return absl::OkStatus();
}
return SkipNBytes(position - Tell());
}
template <typename T>
absl::Status BufferedInputStream::ReadAll(T* result) {
result->clear();
absl::Status status;
while (status.ok()) {
status = FillBuffer();
if (limit_ == 0) {
break;
}
result->append(buf_);
pos_ = limit_;
}
if (absl::IsOutOfRange(status)) {
file_status_ = status;
return absl::OkStatus();
}
return status;
}
template Status BufferedInputStream::ReadAll<std::string>(std::string* result);
template Status BufferedInputStream::ReadAll<tstring>(tstring* result);
absl::Status BufferedInputStream::Reset() {
TF_RETURN_IF_ERROR(input_stream_->Reset());
pos_ = 0;
limit_ = 0;
file_status_ = absl::OkStatus();
return absl::OkStatus();
}
absl::Status BufferedInputStream::ReadLine(std::string* result) {
return ReadLineHelper(result, false);
}
absl::Status BufferedInputStream::ReadLine(tstring* result) {
return ReadLineHelper(result, false);
}
std::string BufferedInputStream::ReadLineAsString() {
std::string result;
ReadLineHelper(&result, true).IgnoreError();
return result;
}
absl::Status BufferedInputStream::SkipLine() {
absl::Status s;
bool skipped = false;
while (true) {
if (pos_ == limit_) {
s = FillBuffer();
if (limit_ == 0) {
break;
}
}
char c = buf_[pos_++];
skipped = true;
if (c == '\n') {
return absl::OkStatus();
}
}
if (absl::IsOutOfRange(s) && skipped) {
return absl::OkStatus();
}
return s;
}
}
} | #include "tsl/lib/io/buffered_inputstream.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/io/random_inputstream.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace io {
namespace {
static std::vector<int> BufferSizes() {
return {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 65536};
}
class ReadOnceInputStream : public InputStreamInterface {
public:
ReadOnceInputStream() : start_(true) {}
virtual absl::Status ReadNBytes(int64_t bytes_to_read, tstring* result) {
if (bytes_to_read < 11) {
return errors::InvalidArgument("Not reading all bytes: ", bytes_to_read);
}
if (start_) {
*result = "0123456789";
start_ = false;
return errors::OutOfRange("Out of range.");
}
return errors::InvalidArgument(
"Redudant call to ReadNBytes after an OutOfRange error.");
}
int64_t Tell() const override { return start_ ? 0 : 10; }
absl::Status Reset() override {
start_ = true;
return absl::OkStatus();
}
private:
bool start_;
};
TEST(BufferedInputStream, ReadLine_Empty) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, ""));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine1) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine_NoTrailingNewLine) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine_EmptyLines) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine_CRLF) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname,
"line one\r\n\r\n\r\nline two\r\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, SkipLine1) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.SkipLine());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
}
}
TEST(BufferedInputStream, SkipLine_NoTrailingNewLine) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.SkipLine());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
}
}
TEST(BufferedInputStream, SkipLine_EmptyLines) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\n\n\nline two"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
}
}
TEST(BufferedInputStream, ReadNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, OutOfRangeCache) {
for (auto buf_size : BufferSizes()) {
if (buf_size < 11) {
continue;
}
ReadOnceInputStream input_stream;
tstring read;
BufferedInputStream in(&input_stream, buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK((in.ReadNBytes(7, &read)));
EXPECT_EQ(read, "3456789");
EXPECT_EQ(10, in.Tell());
absl::Status s = in.ReadNBytes(5, &read);
EXPECT_EQ(error::OUT_OF_RANGE, s.code()) << s;
EXPECT_EQ(read, "");
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
}
}
TEST(BufferedInputStream, SkipNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "34");
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(2));
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(1, &read));
EXPECT_EQ(read, "7");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, ReadNBytesRandomAccessFile) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
tstring read;
BufferedInputStream in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, SkipNBytesRandomAccessFile) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
tstring read;
BufferedInputStream in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "34");
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(2));
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(1, &read));
EXPECT_EQ(read, "7");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, Seek) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
TF_ASSERT_OK(in.Seek(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.Seek(1));
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "1234");
EXPECT_EQ(5, in.Tell());
}
}
TEST(BufferedInputStream, Seek_NotReset) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), 3);
TF_ASSERT_OK(in.ReadNBytes(4, &read));
int before_tell = input_stream.get()->Tell();
EXPECT_EQ(before_tell, 6);
TF_ASSERT_OK(in.Seek(3));
int after_tell = input_stream.get()->Tell();
EXPECT_EQ(before_tell, after_tell);
}
TEST(BufferedInputStream, ReadAll_Empty) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
const string expected = "";
TF_ASSERT_OK(WriteStringToFile(env, fname, expected));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
RandomAccessInputStream input_stream(file.get());
BufferedInputStream in(&input_stream, buf_size);
string contents;
TF_ASSERT_OK(in.ReadAll(&contents));
EXPECT_EQ(expected, contents);
}
}
TEST(BufferedInputStream, ReadAll_Text) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
const string expected = "line one\nline two\nline three";
TF_ASSERT_OK(WriteStringToFile(env, fname, expected));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
RandomAccessInputStream input_stream(file.get());
BufferedInputStream in(&input_stream, buf_size);
string contents;
TF_ASSERT_OK(in.ReadAll(&contents));
EXPECT_EQ(expected, contents);
}
}
void BM_BufferedReaderSmallReads(::testing::benchmark::State& state) {
const int buff_size = state.range(0);
const int file_size = state.range(1);
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
const string file_elem = "0123456789";
std::unique_ptr<WritableFile> write_file;
TF_ASSERT_OK(env->NewWritableFile(fname, &write_file));
for (int i = 0; i < file_size; ++i) {
TF_ASSERT_OK(write_file->Append(file_elem));
}
TF_ASSERT_OK(write_file->Close());
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
tstring result;
int itr = 0;
for (auto s : state) {
BufferedInputStream in(file.get(), buff_size);
for (int64_t i = 0; i < 10 * file_size; ++i) {
TF_ASSERT_OK(in.ReadNBytes(1, &result))
<< "i: " << i << " itr: " << itr << " buff_size: " << buff_size
<< " file size: " << file_size;
}
++itr;
}
}
BENCHMARK(BM_BufferedReaderSmallReads)
->ArgPair(1, 5)
->ArgPair(1, 1024)
->ArgPair(10, 5)
->ArgPair(10, 1024)
->ArgPair(1024, 1024)
->ArgPair(1024 * 1024, 1024)
->ArgPair(1024 * 1024, 1024 * 1024)
->ArgPair(256 * 1024 * 1024, 1024);
}
}
} | int64_t BufferedInputStream::Tell() const {
return input_stream_->Tell() - (limit_ - pos_);
} | TEST(BufferedInputStream, ReadNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
} |
#define EIGEN_USE_THREADS
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/meta_support.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
template <class T1, class T2, class T3>
class QuantizedBiasAddOp : public OpKernel {
public:
explicit QuantizedBiasAddOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& bias = context->input(1);
const Tensor& min_input = context->input(2);
const Tensor& max_input = context->input(3);
const Tensor& min_bias = context->input(4);
const Tensor& max_bias = context->input(5);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(min_input.shape()),
errors::InvalidArgument("`min_input` must be rank 0 but is rank ",
min_input.dims()));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_input.shape()),
errors::InvalidArgument("`max_input` must be rank 0 but is rank ",
max_input.dims()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_bias.shape()),
errors::InvalidArgument(
"`min_bias` must be rank 0 but is rank ", min_bias.dims()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_bias.shape()),
errors::InvalidArgument(
"`max_bias` must be rank 0 but is rank ", max_bias.dims()));
const float input_min = min_input.flat<float>()(0);
const float input_max = max_input.flat<float>()(0);
const float bias_min = min_bias.flat<float>()(0);
const float bias_max = max_bias.flat<float>()(0);
OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()),
errors::InvalidArgument("Input tensor must be at least 2D: ",
input.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(bias.shape()),
errors::InvalidArgument("Biases must be 1D: ",
bias.shape().DebugString()));
const auto last_dim = input.shape().dims() - 1;
OP_REQUIRES(
context, bias.shape().dim_size(0) == input.shape().dim_size(last_dim),
errors::InvalidArgument(
"Must provide as many biases as the last dimension "
"of the input tensor: ",
bias.shape().DebugString(), " vs. ", input.shape().DebugString()));
OP_REQUIRES(context, bias.NumElements() > 0,
errors::InvalidArgument("Must provide at least 1 bias"));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
float total_min;
float total_max;
if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() &&
std::is_same<T2, quint8>() && std::is_same<T3, qint32>()) {
auto input_ui8_array = input.flat<quint8>();
auto bias_ui8_array = bias.flat<quint8>();
GetOutputMinAndMaxForQuantizedAdd(input_min, input_max, bias_min,
bias_max, &total_min, &total_max);
meta::QuantizedBiasAdd(context, input_ui8_array.data(),
input_ui8_array.size(), bias_ui8_array.data(),
bias_ui8_array.size(), input_min, input_max,
bias_min, bias_max, total_min, total_max,
output->flat<qint32>().data());
} else {
QuantizedAddUsingEigen<T1, T2, T3>(
context->template eigen_device<CPUDevice>(), input, input_min,
input_max, bias, bias_min, bias_max, output, &total_min, &total_max);
}
Tensor* output_min = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
output_min->flat<float>()(0) = total_min;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
output_max->flat<float>()(0) = total_max;
}
};
REGISTER_KERNEL_BUILDER(Name("QuantizedBiasAdd")
.Device(DEVICE_CPU)
.TypeConstraint<quint8>("T1")
.TypeConstraint<quint8>("T2")
.TypeConstraint<qint32>("out_type"),
QuantizedBiasAddOp<quint8, quint8, qint32>);
REGISTER_KERNEL_BUILDER(Name("QuantizedBiasAdd")
.Device(DEVICE_CPU)
.TypeConstraint<qint8>("T1")
.TypeConstraint<qint8>("T2")
.TypeConstraint<qint32>("out_type"),
QuantizedBiasAddOp<qint8, qint8, qint32>);
} | #define EIGEN_USE_THREADS
#include <functional>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class QuantizedBiasAddTest : public OpsTestBase {
protected:
};
TEST_F(QuantizedBiasAddTest, Small) {
TF_ASSERT_OK(NodeDefBuilder("quantized_bias_add_op", "QuantizedBiasAdd")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const float input_min = 0.0f;
const float input_max = 60.0f;
const int input_height = 2;
const int input_width = 3;
Tensor input_float(DT_FLOAT, {input_height, input_width});
test::FillValues<float>(&input_float,
{10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f});
Tensor input_quantized =
FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
const float bias_min = 0.0f;
const float bias_max = 3.0f;
const int bias_width = 3;
Tensor bias_float(DT_FLOAT, {bias_width});
test::FillValues<float>(&bias_float, {1.0f, 2.0f, 3.0f});
Tensor bias_quantized =
FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max);
Tensor expected_float(DT_FLOAT, {input_height, input_width});
test::FillValues<float>(&expected_float,
{11.0f, 22.0f, 33.0f, 41.0f, 52.0f, 63.0f});
AddInputFromArray<quint8>(input_quantized.shape(),
input_quantized.flat<quint8>());
AddInputFromArray<quint8>(bias_quantized.shape(),
bias_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({}), {input_min});
AddInputFromArray<float>(TensorShape({}), {input_max});
AddInputFromArray<float>(TensorShape({}), {bias_min});
AddInputFromArray<float>(TensorShape({}), {bias_max});
TF_ASSERT_OK(RunOpKernel());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
}
TEST_F(QuantizedBiasAddTest, RealData) {
TF_ASSERT_OK(NodeDefBuilder("quantized_bias_add_op", "QuantizedBiasAdd")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const float input_min = -2164.25f;
const float input_max = 2006.27f;
const int input_height = 1;
const int input_width = 64;
Tensor input_float(DT_FLOAT, {input_height, input_width});
test::FillValues<float>(
&input_float,
{-1014.12, -157.382, -810.17, 1435.28, 1016.37, 219.684, -316.054,
-2164.25, 2006.27, -547.444, 857.376, 404.376, 9.72115, 332.588,
194.385, -286.57, 26.062, 23.1125, 110.436, 247.055, -127.683,
-376.275, -124.81, -846.826, -77.1507, 305.581, -202.747, 12.9528,
9.64886, 872.686, 40.9069, 197.816, 44.16, -306.768, -1457.52,
-368.939, -1049.42, -486.353, 1745.87, 95.7695, 395.773, -254.333,
-404.27, 787.16, -2.44114, 199.37, -1024.08, 784.901, 235.055,
-42.7295, 241.498, -245.365, 470.763, 186.159, 186.579, -220.163,
1304.58, 386.272, -358.853, -755.996, 360.109, -866.007, 55.2828,
-508.801});
Tensor input_quantized =
FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
const float bias_min = -0.739539f;
const float bias_max = 0.641057f;
const int bias_width = 64;
Tensor bias_float(DT_FLOAT, {bias_width});
test::FillValues<float>(
&bias_float,
{-0.294619, -0.0670519, 0.261507, -0.126274, 0.127229, -0.176945,
-0.251223, 0.231086, 0.453694, 0.415666, -0.288733, 0.508717,
0.211551, 0.0435907, -0.582383, -0.308779, 0.0696883, -0.438122,
0.114, 0.433964, 0.109883, 0.284931, -0.149661, 0.108657,
0.458333, -0.130231, -0.35805, -0.123206, -0.437968, 0.0282411,
0.628818, -0.0522173, -0.0233403, 0.124863, 0.217165, 0.262294,
-0.171005, -0.254693, -0.200433, -0.287354, 0.488166, -0.0354688,
-0.118091, -0.590444, 0.491537, -0.739539, 0.083117, 0.282482,
0.275269, -0.36574, 0.107476, 0.0511428, -0.136887, -0.0149852,
-0.259694, 0.641057, 0.264054, -0.295126, -0.0218791, 0.361211,
0.012448, 0.0709718, -0.392394, -0.434215});
Tensor bias_quantized =
FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max);
Tensor expected_float(DT_FLOAT, {input_height, input_width});
test::FillValues<float>(
&expected_float,
{-1014.42, -157.449, -809.908, 1435.16, 1016.5, 219.507, -316.305,
-2164.02, 2006.73, -547.028, 857.088, 404.885, 9.9327, 332.632,
193.803, -286.878, 26.1317, 22.6744, 110.55, 247.489, -127.573,
-375.99, -124.959, -846.717, -76.6923, 305.451, -203.105, 12.8296,
9.21089, 872.714, 41.5357, 197.764, 44.1367, -306.643, -1457.3,
-368.677, -1049.6, -486.608, 1745.67, 95.4821, 396.261, -254.368,
-404.388, 786.57, -1.94961, 198.63, -1024.0, 785.183, 235.33,
-43.0953, 241.605, -245.314, 470.627, 186.144, 186.319, -219.522,
1304.84, 385.977, -358.874, -755.635, 360.122, -865.936, 54.8904,
-509.235});
AddInputFromArray<quint8>(input_quantized.shape(),
input_quantized.flat<quint8>());
AddInputFromArray<quint8>(bias_quantized.shape(),
bias_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({}), {input_min});
AddInputFromArray<float>(TensorShape({}), {input_max});
AddInputFromArray<float>(TensorShape({}), {bias_min});
AddInputFromArray<float>(TensorShape({}), {bias_max});
TF_ASSERT_OK(RunOpKernel());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 20.0);
}
} | void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& bias = context->input(1);
const Tensor& min_input = context->input(2);
const Tensor& max_input = context->input(3);
const Tensor& min_bias = context->input(4);
const Tensor& max_bias = context->input(5);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(min_input.shape()),
errors::InvalidArgument("`min_input` must be rank 0 but is rank ",
min_input.dims()));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_input.shape()),
errors::InvalidArgument("`max_input` must be rank 0 but is rank ",
max_input.dims()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_bias.shape()),
errors::InvalidArgument(
"`min_bias` must be rank 0 but is rank ", min_bias.dims()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_bias.shape()),
errors::InvalidArgument(
"`max_bias` must be rank 0 but is rank ", max_bias.dims()));
const float input_min = min_input.flat<float>()(0);
const float input_max = max_input.flat<float>()(0);
const float bias_min = min_bias.flat<float>()(0);
const float bias_max = max_bias.flat<float>()(0);
OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()),
errors::InvalidArgument("Input tensor must be at least 2D: ",
input.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(bias.shape()),
errors::InvalidArgument("Biases must be 1D: ",
bias.shape().DebugString()));
const auto last_dim = input.shape().dims() - 1;
OP_REQUIRES(
context, bias.shape().dim_size(0) == input.shape().dim_size(last_dim),
errors::InvalidArgument(
"Must provide as many biases as the last dimension "
"of the input tensor: ",
bias.shape().DebugString(), " vs. ", input.shape().DebugString()));
OP_REQUIRES(context, bias.NumElements() > 0,
errors::InvalidArgument("Must provide at least 1 bias"));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
float total_min;
float total_max;
if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() &&
std::is_same<T2, quint8>() && std::is_same<T3, qint32>()) {
auto input_ui8_array = input.flat<quint8>();
auto bias_ui8_array = bias.flat<quint8>();
GetOutputMinAndMaxForQuantizedAdd(input_min, input_max, bias_min,
bias_max, &total_min, &total_max);
meta::QuantizedBiasAdd(context, input_ui8_array.data(),
input_ui8_array.size(), bias_ui8_array.data(),
bias_ui8_array.size(), input_min, input_max,
bias_min, bias_max, total_min, total_max,
output->flat<qint32>().data());
} else {
QuantizedAddUsingEigen<T1, T2, T3>(
context->template eigen_device<CPUDevice>(), input, input_min,
input_max, bias, bias_min, bias_max, output, &total_min, &total_max);
}
Tensor* output_min = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
output_min->flat<float>()(0) = total_min;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
output_max->flat<float>()(0) = total_max;
} | TEST_F(QuantizedBiasAddTest, Small) {
TF_ASSERT_OK(NodeDefBuilder("quantized_bias_add_op", "QuantizedBiasAdd")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const float input_min = 0.0f;
const float input_max = 60.0f;
const int input_height = 2;
const int input_width = 3;
Tensor input_float(DT_FLOAT, {input_height, input_width});
test::FillValues<float>(&input_float,
{10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f});
Tensor input_quantized =
FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
const float bias_min = 0.0f;
const float bias_max = 3.0f;
const int bias_width = 3;
Tensor bias_float(DT_FLOAT, {bias_width});
test::FillValues<float>(&bias_float, {1.0f, 2.0f, 3.0f});
Tensor bias_quantized =
FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max);
Tensor expected_float(DT_FLOAT, {input_height, input_width});
test::FillValues<float>(&expected_float,
{11.0f, 22.0f, 33.0f, 41.0f, 52.0f, 63.0f});
AddInputFromArray<quint8>(input_quantized.shape(),
input_quantized.flat<quint8>());
AddInputFromArray<quint8>(bias_quantized.shape(),
bias_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({}), {input_min});
AddInputFromArray<float>(TensorShape({}), {input_max});
AddInputFromArray<float>(TensorShape({}), {bias_min});
AddInputFromArray<float>(TensorShape({}), {bias_max});
TF_ASSERT_OK(RunOpKernel());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
}
TEST_F(QuantizedBiasAddTest, RealData) {
TF_ASSERT_OK(NodeDefBuilder("quantized_bias_add_op", "QuantizedBiasAdd")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const float input_min = -2164.25f;
const float input_max = 2006.27f;
const int input_height = 1;
const int input_width = 64;
Tensor input_float(DT_FLOAT, {input_height, input_width});
test::FillValues<float>(
&input_float,
{-1014.12, -157.382, -810.17, 1435.28, 1016.37, 219.684, -316.054,
-2164.25, 2006.27, -547.444, 857.376, 404.376, 9.72115, 332.588,
194.385, -286.57, 26.062, 23.1125, 110.436, 247.055, -127.683,
-376.275, -124.81, -846.826, -77.1507, 305.581, -202.747, 12.9528,
9.64886, 872.686, 40.9069, 197.816, 44.16, -306.768, -1457.52,
-368.939, -1049.42, -486.353, 1745.87, 95.7695, 395.773, -254.333,
-404.27, 787.16, -2.44114, 199.37, -1024.08, 784.901, 235.055,
-42.7295, 241.498, -245.365, 470.763, 186.159, 186.579, -220.163,
1304.58, 386.272, -358.853, -755.996, 360.109, -866.007, 55.2828,
-508.801});
Tensor input_quantized =
FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
const float bias_min = -0.739539f;
const float bias_max = 0.641057f;
const int bias_width = 64;
Tensor bias_float(DT_FLOAT, {bias_width});
test::FillValues<float>(
&bias_float,
{-0.294619, -0.0670519, 0.261507, -0.126274, 0.127229, -0.176945,
-0.251223, 0.231086, 0.453694, 0.415666, -0.288733, 0.508717,
0.211551, 0.0435907, -0.582383, -0.308779, 0.0696883, -0.438122,
0.114, 0.433964, 0.109883, 0.284931, -0.149661, 0.108657,
0.458333, -0.130231, -0.35805, -0.123206, -0.437968, 0.0282411,
0.628818, -0.0522173, -0.0233403, 0.124863, 0.217165, 0.262294,
-0.171005, -0.254693, -0.200433, -0.287354, 0.488166, -0.0354688,
-0.118091, -0.590444, 0.491537, -0.739539, 0.083117, 0.282482,
0.275269, -0.36574, 0.107476, 0.0511428, -0.136887, -0.0149852,
-0.259694, 0.641057, 0.264054, -0.295126, -0.0218791, 0.361211,
0.012448, 0.0709718, -0.392394, -0.434215});
Tensor bias_quantized =
FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max);
Tensor expected_float(DT_FLOAT, {input_height, input_width});
test::FillValues<float>(
&expected_float,
{-1014.42, -157.449, -809.908, 1435.16, 1016.5, 219.507, -316.305,
-2164.02, 2006.73, -547.028, 857.088, 404.885, 9.9327, 332.632,
193.803, -286.878, 26.1317, 22.6744, 110.55, 247.489, -127.573,
-375.99, -124.959, -846.717, -76.6923, 305.451, -203.105, 12.8296,
9.21089, 872.714, 41.5357, 197.764, 44.1367, -306.643, -1457.3,
-368.677, -1049.6, -486.608, 1745.67, 95.4821, 396.261, -254.368,
-404.388, 786.57, -1.94961, 198.63, -1024.0, 785.183, 235.33,
-43.0953, 241.605, -245.314, 470.627, 186.144, 186.319, -219.522,
1304.84, 385.977, -358.874, -755.635, 360.122, -865.936, 54.8904,
-509.235});
AddInputFromArray<quint8>(input_quantized.shape(),
input_quantized.flat<quint8>());
AddInputFromArray<quint8>(bias_quantized.shape(),
bias_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({}), {input_min});
AddInputFromArray<float>(TensorShape({}), {input_max});
AddInputFromArray<float>(TensorShape({}), {bias_min});
AddInputFromArray<float>(TensorShape({}), {bias_max});
TF_ASSERT_OK(RunOpKernel());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 20.0);
} |
#include "tensorstore/internal/json_gtest.h"
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json_pointer.h"
#include "tensorstore/util/quote_string.h"
namespace tensorstore {
namespace {
class JsonMatcherImpl : public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonMatcherImpl(::nlohmann::json value) : value_(std::move(value)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
if (!internal_json::JsonSame(value_, value_untyped)) {
if (listener->IsInterested()) {
*listener << "where the difference is:\n"
<< ::nlohmann::json::diff(value_, value_untyped).dump(2);
}
return false;
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << "matches json " << value_;
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not match json " << value_;
}
private:
::nlohmann::json value_;
};
}
::testing::Matcher<::nlohmann::json> MatchesJson(::nlohmann::json j) {
return ::testing::MakeMatcher(new JsonMatcherImpl(std::move(j)));
}
namespace {
class JsonPointerMatcherImpl
: public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonPointerMatcherImpl(std::string sub_value_pointer,
::testing::Matcher<::nlohmann::json> sub_value_matcher)
: sub_value_pointer_(std::move(sub_value_pointer)),
sub_value_matcher_(std::move(sub_value_matcher)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
auto sub_value =
json_pointer::Dereference(value_untyped, sub_value_pointer_);
if (!sub_value.ok()) {
if (listener->IsInterested()) {
*listener << "where the pointer could not be resolved: "
<< sub_value.status();
}
return false;
}
if (listener->IsInterested()) {
::testing::StringMatchResultListener s;
if (!sub_value_matcher_.MatchAndExplain(**sub_value, &s)) {
*listener << "whose sub value doesn't match";
auto str = s.str();
if (!str.empty()) {
*listener << ", " << str;
}
return false;
}
return true;
}
return sub_value_matcher_.Matches(**sub_value);
}
void DescribeTo(std::ostream* os) const override {
*os << "has sub value " << tensorstore::QuoteString(sub_value_pointer_)
<< " that ";
sub_value_matcher_.DescribeTo(os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not have sub value "
<< tensorstore::QuoteString(sub_value_pointer_) << " that ";
sub_value_matcher_.DescribeTo(os);
}
private:
std::string sub_value_pointer_;
::testing::Matcher<nlohmann::json> sub_value_matcher_;
};
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer,
::testing::Matcher<::nlohmann::json> value_matcher) {
return ::testing::MakeMatcher(new JsonPointerMatcherImpl(
std::move(json_pointer), std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer, ::nlohmann::json value_matcher) {
return JsonSubValueMatches(std::move(json_pointer),
MatchesJson(std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValuesMatch(
std::vector<std::pair<std::string, ::nlohmann::json>> matchers) {
std::vector<::testing::Matcher<::nlohmann::json>> all;
all.reserve(matchers.size());
for (const auto& p : matchers) {
all.push_back(JsonSubValueMatches(p.first, p.second));
}
return ::testing::AllOfArray(all);
}
} | #include "tensorstore/internal/json_gtest.h"
#include <sstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::JsonSubValueMatches;
using ::tensorstore::JsonSubValuesMatch;
using ::tensorstore::MatchesJson;
template <typename MatcherType>
std::string Describe(const MatcherType& m) {
std::ostringstream ss;
m.DescribeTo(&ss);
return ss.str();
}
template <typename MatcherType, typename Value>
std::string Explain(const MatcherType& m, const Value& x) {
testing::StringMatchResultListener listener;
ExplainMatchResult(m, x, &listener);
return listener.str();
}
TEST(JsonSubValueMatchesTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValueMatches("/a", 123));
EXPECT_THAT(obj, JsonSubValueMatches("/b/c", "xyz"));
EXPECT_THAT(obj,
JsonSubValueMatches("/b/c", ::testing::Not(MatchesJson("xy"))));
EXPECT_THAT(Describe(JsonSubValueMatches("/a", 123)),
"has sub value \"/a\" that matches json 123");
EXPECT_THAT(Explain(JsonSubValueMatches("/a", 124), obj),
::testing::StartsWith(
"whose sub value doesn't match, where the difference is:"));
}
TEST(JsonSubValuesMatchTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValuesMatch({{"/a", 123}, {"/b/c", "xyz"}}));
}
} | bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
if (!internal_json::JsonSame(value_, value_untyped)) {
if (listener->IsInterested()) {
*listener << "where the difference is:\n"
<< ::nlohmann::json::diff(value_, value_untyped).dump(2);
}
return false;
}
return true;
}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
auto sub_value =
json_pointer::Dereference(value_untyped, sub_value_pointer_);
if (!sub_value.ok()) {
if (listener->IsInterested()) {
*listener << "where the pointer could not be resolved: "
<< sub_value.status();
}
return false;
}
if (listener->IsInterested()) {
::testing::StringMatchResultListener s;
if (!sub_value_matcher_.MatchAndExplain(**sub_value, &s)) {
*listener << "whose sub value doesn't match";
auto str = s.str();
if (!str.empty()) {
*listener << ", " << str;
}
return false;
}
return true;
}
return sub_value_matcher_.Matches(**sub_value);
} | TEST(JsonSubValuesMatchTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValuesMatch({{"/a", 123}, {"/b/c", "xyz"}}));
} |
#include "tensorflow/lite/delegates/gpu/common/memory_management.h"
#include <cstddef>
#include <numeric>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_in_order_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace {
size_t TotalSize(const ObjectsAssignment<size_t>& assignment) {
return std::accumulate(assignment.object_sizes.begin(),
assignment.object_sizes.end(), static_cast<size_t>(0));
}
}
OffsetsAssignment ObjectsToOffsets(
const ObjectsAssignment<size_t>& obj_assignment) {
size_t num_tensors = obj_assignment.object_ids.size();
size_t num_objects = obj_assignment.object_sizes.size();
OffsetsAssignment result = {std::vector<size_t>(num_tensors),
0};
std::vector<size_t> ids_to_offset(num_objects);
for (size_t i = 0; i < num_objects; ++i) {
ids_to_offset[i] = result.total_size;
result.total_size += obj_assignment.object_sizes[i];
}
for (size_t i = 0; i < num_tensors; ++i) {
result.offsets[i] = ids_to_offset[obj_assignment.object_ids[i]];
}
return result;
}
absl::Status BestGreedy(
const std::vector<TensorUsageRecord<size_t>>& usage_records,
ObjectsAssignment<size_t>* assignment) {
RETURN_IF_ERROR(
GreedyBySizeDistPriorityAssignment(usage_records, assignment));
ObjectsAssignment<size_t> assignment_by_breadth;
if (GreedyByBreadthAssignment(usage_records, &assignment_by_breadth).ok() &&
TotalSize(assignment_by_breadth) < TotalSize(*assignment)) {
std::swap(*assignment, assignment_by_breadth);
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<size_t>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<size_t>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignmentWithHash(usage_records, assignment);
case MemoryStrategy::GREEDY_IN_ORDER:
return GreedyInOrderAssignment(usage_records, assignment,
reallocation_graph);
case MemoryStrategy::GREEDY_BY_BREADTH:
return GreedyByBreadthAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_BY_SIZE:
return GreedyBySizeDistPriorityAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_BEST:
return BestGreedy(usage_records, assignment);
case MemoryStrategy::MINCOSTFLOW:
return MinCostFlowAssignment(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<BHWC>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<BHWC>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignmentWithHash(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<uint2>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<uint2>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_IN_ORDER:
return GreedyInOrderAssignmentMultidimensional(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<uint3>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<uint3>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_IN_ORDER:
return GreedyInOrderAssignmentMultidimensional(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
absl::Status AssignOffsetsToTensors(
const std::vector<TensorUsageRecord<size_t>>& usage_records,
const MemoryStrategy& strategy, OffsetsAssignment* assignment,
size_t base_addr_align_bytes, const UsageGraph* reallocation_graph) {
if (strategy == MemoryStrategy::GREEDY_BY_SIZE) {
return GreedyBySizeAssignment(usage_records, base_addr_align_bytes,
assignment);
}
ObjectsAssignment<size_t> objects_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(
usage_records, strategy, &objects_assignment, reallocation_graph));
*assignment = ObjectsToOffsets(objects_assignment);
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/memory_management.h"
#include <cstddef>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::ElementsAre;
TEST(Model, EmptyAssignment) {
ObjectsAssignment<size_t> objects_assignment;
OffsetsAssignment result = ObjectsToOffsets(objects_assignment);
EXPECT_TRUE(result.offsets.empty());
EXPECT_EQ(result.total_size, 0);
}
TEST(Model, OneObjectAssignment) {
ObjectsAssignment<size_t> objects_assignment;
objects_assignment.object_sizes = {16};
objects_assignment.object_ids = {0};
OffsetsAssignment result = ObjectsToOffsets(objects_assignment);
EXPECT_EQ(result.total_size, 16);
EXPECT_THAT(result.offsets, ElementsAre(0));
objects_assignment.object_ids = {0, 0, 0};
result = ObjectsToOffsets(objects_assignment);
EXPECT_EQ(result.total_size, 16);
EXPECT_THAT(result.offsets, ElementsAre(0, 0, 0));
}
TEST(Model, ManyObjectsAssignment) {
ObjectsAssignment<size_t> objects_assignment;
objects_assignment.object_sizes = {16, 8, 32, 32, 4, 16};
objects_assignment.object_ids = {2, 0, 2, 1, 3, 3, 1, 5};
OffsetsAssignment result = ObjectsToOffsets(objects_assignment);
EXPECT_THAT(result.offsets, ElementsAre(24, 0, 24, 16, 56, 56, 16, 92));
}
TEST(Model, EmptyRecords) {
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::NAIVE, &assignment).ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::EQUALITY, &assignment).ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::MINCOSTFLOW, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors({}, MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_TRUE(offsets_assignment.offsets.empty());
EXPECT_EQ(offsets_assignment.total_size, 0);
}
TEST(Model, OneRecord) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1}};
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0));
EXPECT_EQ(offsets_assignment.total_size, 16);
}
TEST(Model, ChainRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1},
{8, 1, 2},
{64, 2, 3},
{32, 3, 4},
{8, 4, 5},
};
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16, 8, 64, 32, 8));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 1));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16, 8, 64, 32));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0, 64, 0, 64, 0));
EXPECT_EQ(offsets_assignment.total_size, 96);
}
TEST(Model, ComplexRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{32, 0, 1},
{32, 1, 4},
{8, 2, 5},
{16, 3, 5},
{8, 4, 5},
{64, 5, 7},
{8, 6, 8},
{8, 7, 8},
{16, 8, 9}};
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(32, 32, 8, 16, 8, 64, 8, 8, 16));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 4, 2, 3));
EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 32, 8, 16, 8, 64));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 3, 2, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 64, 8, 8));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 2, 3, 1, 3, 2, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 64, 16, 8));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 4, 2, 1, 3, 0, 2, 3, 1));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 16, 8, 8, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(1, 0, 2, 1, 3, 0, 1, 2, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32, 8, 8));
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_THAT(offsets_assignment.offsets,
ElementsAre(0, 32, 80, 64, 88, 0, 64, 72, 0));
EXPECT_EQ(offsets_assignment.total_size, 96);
}
TEST(Model, BHWCRecords) {
std::vector<TensorUsageRecord<BHWC>> usage_records{
{BHWC(1, 1, 2, 8), 0, 1},
{BHWC(1, 1, 2, 8), 1, 2},
{BHWC(1, 1, 1, 16), 2, 4},
{BHWC(1, 1, 2, 8), 3, 5},
{BHWC(1, 1, 8, 2), 4, 5},
{BHWC(1, 1, 2, 8), 5, 7},
{BHWC(1, 16, 1, 1), 6, 8},
{BHWC(16, 1, 1, 1), 7, 8},
{BHWC(1, 1, 1, 16), 8, 9}};
ObjectsAssignment<BHWC> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(
assignment.object_sizes,
ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16),
BHWC(1, 1, 2, 8), BHWC(1, 1, 8, 2), BHWC(1, 1, 2, 8),
BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1), BHWC(1, 1, 1, 16)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 1, 3, 0, 4, 5, 2));
EXPECT_THAT(
assignment.object_sizes,
ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16),
BHWC(1, 1, 8, 2), BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1)));
}
TEST(Model, UInt2Records) {
std::vector<TensorUsageRecord<uint2>> usage_records{
{uint2(2, 8), 0, 1},
{uint2(2, 8), 1, 2},
{uint2(1, 12), 2, 4},
{uint2(2, 8), 3, 5},
{uint2(8, 2), 4, 5},
{uint2(2, 8), 5, 7},
{uint2(1, 8), 6, 8},
{uint2(2, 8), 7, 8},
{uint2(4, 1), 8, 9}};
ObjectsAssignment<uint2> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(2, 8),
uint2(8, 2), uint2(2, 8), uint2(1, 8), uint2(2, 8),
uint2(4, 1)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 4, 0, 5));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(8, 2),
uint2(1, 8), uint2(4, 1)));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 2, 0, 3));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(8, 2)));
}
TEST(Model, UInt3Records) {
std::vector<TensorUsageRecord<uint3>> usage_records{
{uint3(1, 2, 8), 0, 1},
{uint3(4, 3, 2), 1, 2},
{uint3(1, 1, 1), 2, 4},
{uint3(2, 4, 1), 3, 5},
{uint3(2, 2, 2), 4, 5},
{uint3(8, 1, 2), 5, 7},
{uint3(1, 2, 1), 6, 8},
{uint3(1, 1, 1), 7, 8},
{uint3(2, 2, 2), 8, 9}};
ObjectsAssignment<uint3> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(1, 1, 1),
uint3(2, 4, 1), uint3(2, 2, 2), uint3(8, 1, 2),
uint3(1, 2, 1), uint3(1, 1, 1), uint3(2, 2, 2)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 2, 4));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(1, 1, 1),
uint3(2, 4, 1), uint3(2, 2, 2), uint3(8, 1, 2),
uint3(1, 2, 1)));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 2, 1, 3, 2, 0, 1));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(2, 4, 1),
uint3(8, 1, 2)));
}
TEST(Model, OffsetAssignmentWithAlignment) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1},
{8, 1, 2},
{64, 2, 3},
{32, 3, 4},
{8, 4, 5},
};
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment,
128)
.ok());
EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0, 128, 0, 128, 0));
EXPECT_EQ(offsets_assignment.total_size, 160);
}
}
}
} | template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<BHWC>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<BHWC>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignmentWithHash(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
} | TEST(Model, BHWCRecords) {
std::vector<TensorUsageRecord<BHWC>> usage_records{
{BHWC(1, 1, 2, 8), 0, 1},
{BHWC(1, 1, 2, 8), 1, 2},
{BHWC(1, 1, 1, 16), 2, 4},
{BHWC(1, 1, 2, 8), 3, 5},
{BHWC(1, 1, 8, 2), 4, 5},
{BHWC(1, 1, 2, 8), 5, 7},
{BHWC(1, 16, 1, 1), 6, 8},
{BHWC(16, 1, 1, 1), 7, 8},
{BHWC(1, 1, 1, 16), 8, 9}};
ObjectsAssignment<BHWC> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(
assignment.object_sizes,
ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16),
BHWC(1, 1, 2, 8), BHWC(1, 1, 8, 2), BHWC(1, 1, 2, 8),
BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1), BHWC(1, 1, 1, 16)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 1, 3, 0, 4, 5, 2));
EXPECT_THAT(
assignment.object_sizes,
ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16),
BHWC(1, 1, 8, 2), BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1)));
} |
#ifndef TENSORFLOW_TSL_LIB_GTL_INT_TYPE_H_
#define TENSORFLOW_TSL_LIB_GTL_INT_TYPE_H_
#include <stddef.h>
#include <functional>
#include <iosfwd>
#include <ostream>
#include <unordered_map>
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
template <typename IntTypeName, typename _ValueType>
class IntType;
#define TSL_LIB_GTL_DEFINE_INT_TYPE(int_type_name, value_type) \
struct int_type_name##_tag_ {}; \
typedef ::tsl::gtl::IntType<int_type_name##_tag_, value_type> int_type_name;
template <typename IntTypeName, typename _ValueType>
class IntType {
public:
typedef _ValueType ValueType;
typedef IntType<IntTypeName, ValueType> ThisType;
struct Hasher {
size_t operator()(const IntType& arg) const {
return static_cast<size_t>(arg.value());
}
};
template <typename H>
friend H AbslHashValue(H h, const IntType& i) {
return H::combine(std::move(h), i.value());
}
public:
constexpr IntType() : value_(0) {}
constexpr explicit IntType(ValueType value) : value_(value) {}
constexpr ValueType value() const { return value_; }
template <typename ValType>
constexpr ValType value() const {
return static_cast<ValType>(value_);
}
ThisType& operator++() {
++value_;
return *this;
}
const ThisType operator++(int v) {
ThisType temp(*this);
++value_;
return temp;
}
ThisType& operator--() {
--value_;
return *this;
}
const ThisType operator--(int v) {
ThisType temp(*this);
--value_;
return temp;
}
constexpr bool operator!() const { return value_ == 0; }
constexpr const ThisType operator+() const { return ThisType(value_); }
constexpr const ThisType operator-() const { return ThisType(-value_); }
constexpr const ThisType operator~() const { return ThisType(~value_); }
#define INT_TYPE_ASSIGNMENT_OP(op) \
ThisType& operator op(const ThisType& arg_value) { \
value_ op arg_value.value(); \
return *this; \
} \
ThisType& operator op(ValueType arg_value) { \
value_ op arg_value; \
return *this; \
}
INT_TYPE_ASSIGNMENT_OP(+=);
INT_TYPE_ASSIGNMENT_OP(-=);
INT_TYPE_ASSIGNMENT_OP(*=);
INT_TYPE_ASSIGNMENT_OP(/=);
INT_TYPE_ASSIGNMENT_OP(<<=);
INT_TYPE_ASSIGNMENT_OP(>>=);
INT_TYPE_ASSIGNMENT_OP(%=);
#undef INT_TYPE_ASSIGNMENT_OP
ThisType& operator=(ValueType arg_value) {
value_ = arg_value;
return *this;
}
private:
ValueType value_;
static_assert(std::is_integral<ValueType>::value, "invalid integer type");
} TF_PACKED;
template <typename IntTypeName, typename ValueType>
std::ostream& operator<<(std::ostream& os,
IntType<IntTypeName, ValueType> arg) {
return os << arg.value();
}
#define INT_TYPE_ARITHMETIC_OP(op) \
template <typename IntTypeName, typename ValueType> \
static inline constexpr IntType<IntTypeName, ValueType> operator op( \
IntType<IntTypeName, ValueType> id_1, \
IntType<IntTypeName, ValueType> id_2) { \
return IntType<IntTypeName, ValueType>(id_1.value() op id_2.value()); \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr IntType<IntTypeName, ValueType> operator op( \
IntType<IntTypeName, ValueType> id, \
typename IntType<IntTypeName, ValueType>::ValueType arg_val) { \
return IntType<IntTypeName, ValueType>(id.value() op arg_val); \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr IntType<IntTypeName, ValueType> operator op( \
typename IntType<IntTypeName, ValueType>::ValueType arg_val, \
IntType<IntTypeName, ValueType> id) { \
return IntType<IntTypeName, ValueType>(arg_val op id.value()); \
}
INT_TYPE_ARITHMETIC_OP(+);
INT_TYPE_ARITHMETIC_OP(-);
INT_TYPE_ARITHMETIC_OP(*);
INT_TYPE_ARITHMETIC_OP(/);
INT_TYPE_ARITHMETIC_OP(<<);
INT_TYPE_ARITHMETIC_OP(>>);
INT_TYPE_ARITHMETIC_OP(%);
#undef INT_TYPE_ARITHMETIC_OP
#define INT_TYPE_COMPARISON_OP(op) \
template <typename IntTypeName, typename ValueType> \
static inline constexpr bool operator op( \
IntType<IntTypeName, ValueType> id_1, \
IntType<IntTypeName, ValueType> id_2) { \
return id_1.value() op id_2.value(); \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr bool operator op( \
IntType<IntTypeName, ValueType> id, \
typename IntType<IntTypeName, ValueType>::ValueType val) { \
return id.value() op val; \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr bool operator op( \
typename IntType<IntTypeName, ValueType>::ValueType val, \
IntType<IntTypeName, ValueType> id) { \
return val op id.value(); \
}
INT_TYPE_COMPARISON_OP(==);
INT_TYPE_COMPARISON_OP(!=);
INT_TYPE_COMPARISON_OP(<);
INT_TYPE_COMPARISON_OP(<=);
INT_TYPE_COMPARISON_OP(>);
INT_TYPE_COMPARISON_OP(>=);
#undef INT_TYPE_COMPARISON_OP
}
}
#endif | #include "tsl/lib/gtl/int_type.h"
#include <memory>
#include <unordered_map>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
TSL_LIB_GTL_DEFINE_INT_TYPE(Int8_IT, int8);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt8_IT, uint8);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int16_IT, int16);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt16_IT, uint16);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int32_IT, int32);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int64_IT, int64_t);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt32_IT, uint32);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt64_IT, uint64);
TSL_LIB_GTL_DEFINE_INT_TYPE(Long_IT, long);
template <typename IntType_Type>
class IntTypeTest : public ::testing::Test {};
typedef ::testing::Types<Int8_IT, UInt8_IT, Int16_IT, UInt16_IT, Int32_IT,
Int64_IT, UInt64_IT, Long_IT>
SupportedIntTypes;
TYPED_TEST_SUITE(IntTypeTest, SupportedIntTypes);
TYPED_TEST(IntTypeTest, TestInitialization) {
constexpr TypeParam a;
constexpr TypeParam b(1);
constexpr TypeParam c(b);
EXPECT_EQ(0, a);
EXPECT_EQ(1, b);
EXPECT_EQ(1, c);
}
TYPED_TEST(IntTypeTest, TestOperators) {
TypeParam a(0);
TypeParam b(1);
TypeParam c(2);
constexpr TypeParam d(3);
constexpr TypeParam e(4);
EXPECT_EQ(0, (a++).value());
EXPECT_EQ(2, (++a).value());
EXPECT_EQ(2, (a--).value());
EXPECT_EQ(0, (--a).value());
EXPECT_EQ(true, !a);
EXPECT_EQ(false, !b);
static_assert(!d == false, "Unary operator! failed");
EXPECT_EQ(a.value(), +a);
static_assert(+d == d.value(), "Unary operator+ failed");
EXPECT_EQ(-a.value(), -a);
static_assert(-d == -d.value(), "Unary operator- failed");
EXPECT_EQ(~a.value(), ~a);
EXPECT_EQ(~b.value(), ~b);
static_assert(~d == ~d.value(), "Unary operator~ failed");
c = a = b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = b = 2;
EXPECT_EQ(2, b.value());
EXPECT_EQ(2, c.value());
c = a += b;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= b;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= b;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a += 2;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= 2;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= 2;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
a = 0;
b = 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == 0);
EXPECT_FALSE(1 == a);
static_assert(d == d, "operator== failed");
static_assert(d == 3, "operator== failed");
static_assert(3 == d, "operator== failed");
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != 1);
EXPECT_FALSE(0 != a);
static_assert(d != e, "operator!= failed");
static_assert(d != 4, "operator!= failed");
static_assert(4 != d, "operator!= failed");
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < 1);
EXPECT_FALSE(0 < a);
static_assert(d < e, "operator< failed");
static_assert(d < 4, "operator< failed");
static_assert(3 < e, "operator< failed");
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= 1);
EXPECT_TRUE(0 <= a);
static_assert(d <= e, "operator<= failed");
static_assert(d <= 4, "operator<= failed");
static_assert(3 <= e, "operator<= failed");
EXPECT_FALSE(a > b);
EXPECT_FALSE(a > 1);
EXPECT_FALSE(0 > a);
static_assert(e > d, "operator> failed");
static_assert(e > 3, "operator> failed");
static_assert(4 > d, "operator> failed");
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= 1);
EXPECT_TRUE(0 >= a);
static_assert(e >= d, "operator>= failed");
static_assert(e >= 3, "operator>= failed");
static_assert(4 >= d, "operator>= failed");
a = 1;
b = 3;
EXPECT_EQ(4, (a + b).value());
EXPECT_EQ(4, (a + 3).value());
EXPECT_EQ(4, (1 + b).value());
static_assert((d + e).value() == 7, "Binary operator+ failed");
static_assert((d + 4).value() == 7, "Binary operator+ failed");
static_assert((3 + e).value() == 7, "Binary operator+ failed");
EXPECT_EQ(2, (b - a).value());
EXPECT_EQ(2, (b - 1).value());
EXPECT_EQ(2, (3 - a).value());
static_assert((e - d).value() == 1, "Binary operator- failed");
static_assert((e - 3).value() == 1, "Binary operator- failed");
static_assert((4 - d).value() == 1, "Binary operator- failed");
EXPECT_EQ(3, (a * b).value());
EXPECT_EQ(3, (a * 3).value());
EXPECT_EQ(3, (1 * b).value());
static_assert((d * e).value() == 12, "Binary operator* failed");
static_assert((d * 4).value() == 12, "Binary operator* failed");
static_assert((3 * e).value() == 12, "Binary operator* failed");
EXPECT_EQ(0, (a / b).value());
EXPECT_EQ(0, (a / 3).value());
EXPECT_EQ(0, (1 / b).value());
static_assert((d / e).value() == 0, "Binary operator/ failed");
static_assert((d / 4).value() == 0, "Binary operator/ failed");
static_assert((3 / e).value() == 0, "Binary operator/ failed");
EXPECT_EQ(8, (a << b).value());
EXPECT_EQ(8, (a << 3).value());
EXPECT_EQ(8, (1 << b).value());
static_assert((d << e).value() == 48, "Binary operator<< failed");
static_assert((d << 4).value() == 48, "Binary operator<< failed");
static_assert((3 << e).value() == 48, "Binary operator<< failed");
b = 8;
EXPECT_EQ(4, (b >> a).value());
EXPECT_EQ(4, (b >> 1).value());
EXPECT_EQ(4, (8 >> a).value());
static_assert((d >> e).value() == 0, "Binary operator>> failed");
static_assert((d >> 4).value() == 0, "Binary operator>> failed");
static_assert((3 >> e).value() == 0, "Binary operator>> failed");
b = 3;
a = 2;
EXPECT_EQ(1, (b % a).value());
EXPECT_EQ(1, (b % 2).value());
EXPECT_EQ(1, (3 % a).value());
static_assert((e % d).value() == 1, "Binary operator% failed");
static_assert((e % 3).value() == 1, "Binary operator% failed");
static_assert((4 % d).value() == 1, "Binary operator% failed");
}
TYPED_TEST(IntTypeTest, TestHashFunctor) {
std::unordered_map<TypeParam, char, typename TypeParam::Hasher> map;
TypeParam a(0);
map[a] = 'c';
EXPECT_EQ('c', map[a]);
map[++a] = 'o';
EXPECT_EQ('o', map[a]);
TypeParam b(a);
EXPECT_EQ(typename TypeParam::Hasher()(a), typename TypeParam::Hasher()(b));
}
TYPED_TEST(IntTypeTest, TestValueAccessor) {
constexpr typename TypeParam::ValueType i = -1;
constexpr TypeParam int_type(i);
EXPECT_EQ(i, int_type.value());
static_assert(int_type.value() == i, "value() failed");
EXPECT_EQ(static_cast<int>(i), int_type.template value<int>());
EXPECT_EQ(static_cast<int8>(i), int_type.template value<int8>());
EXPECT_EQ(static_cast<int16>(i), int_type.template value<int16>());
EXPECT_EQ(static_cast<int32>(i), int_type.template value<int32>());
EXPECT_EQ(static_cast<uint32>(i), int_type.template value<uint32>());
EXPECT_EQ(static_cast<int64_t>(i), int_type.template value<int64_t>());
EXPECT_EQ(static_cast<uint64>(i), int_type.template value<uint64>());
EXPECT_EQ(static_cast<long>(i), int_type.template value<long>());
static_assert(int_type.template value<int>() == static_cast<int>(i),
"value<Value>() failed");
}
TYPED_TEST(IntTypeTest, TestMove) {
struct NotCopyable {
TypeParam inttype;
std::unique_ptr<int> ptr;
static NotCopyable Make(int i) {
NotCopyable f;
f.inttype = TypeParam(i);
f.ptr.reset(new int(i));
return f;
}
};
NotCopyable foo = NotCopyable::Make(123);
EXPECT_EQ(123, foo.inttype);
EXPECT_EQ(123, *foo.ptr);
foo = NotCopyable::Make(321);
EXPECT_EQ(321, foo.inttype);
EXPECT_EQ(321, *foo.ptr);
}
} | constexpr const ThisType operator-() const { return ThisType(-value_); } | TYPED_TEST(IntTypeTest, TestOperators) {
TypeParam a(0);
TypeParam b(1);
TypeParam c(2);
constexpr TypeParam d(3);
constexpr TypeParam e(4);
EXPECT_EQ(0, (a++).value());
EXPECT_EQ(2, (++a).value());
EXPECT_EQ(2, (a--).value());
EXPECT_EQ(0, (--a).value());
EXPECT_EQ(true, !a);
EXPECT_EQ(false, !b);
static_assert(!d == false, "Unary operator! failed");
EXPECT_EQ(a.value(), +a);
static_assert(+d == d.value(), "Unary operator+ failed");
EXPECT_EQ(-a.value(), -a);
static_assert(-d == -d.value(), "Unary operator- failed");
EXPECT_EQ(~a.value(), ~a);
EXPECT_EQ(~b.value(), ~b);
static_assert(~d == ~d.value(), "Unary operator~ failed");
c = a = b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = b = 2;
EXPECT_EQ(2, b.value());
EXPECT_EQ(2, c.value());
c = a += b;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= b;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= b;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a += 2;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= 2;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= 2;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
a = 0;
b = 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == 0);
EXPECT_FALSE(1 == a);
static_assert(d == d, "operator== failed");
static_assert(d == 3, "operator== failed");
static_assert(3 == d, "operator== failed");
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != 1);
EXPECT_FALSE(0 != a);
static_assert(d != e, "operator!= failed");
static_assert(d != 4, "operator!= failed");
static_assert(4 != d, "operator!= failed");
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < 1);
EXPECT_FALSE(0 < a);
static_assert(d < e, "operator< failed");
static_assert(d < 4, "operator< failed");
static_assert(3 < e, "operator< failed");
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= 1);
EXPECT_TRUE(0 <= a);
static_assert(d <= e, "operator<= failed");
static_assert(d <= 4, "operator<= failed");
static_assert(3 <= e, "operator<= failed");
EXPECT_FALSE(a > b);
EXPECT_FALSE(a > 1);
EXPECT_FALSE(0 > a);
static_assert(e > d, "operator> failed");
static_assert(e > 3, "operator> failed");
static_assert(4 > d, "operator> failed");
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= 1);
EXPECT_TRUE(0 >= a);
static_assert(e >= d, "operator>= failed");
static_assert(e >= 3, "operator>= failed");
static_assert(4 >= d, "operator>= failed");
a = 1;
b = 3;
EXPECT_EQ(4, (a + b).value());
EXPECT_EQ(4, (a + 3).value());
EXPECT_EQ(4, (1 + b).value());
static_assert((d + e).value() == 7, "Binary operator+ failed");
static_assert((d + 4).value() == 7, "Binary operator+ failed");
static_assert((3 + e).value() == 7, "Binary operator+ failed");
EXPECT_EQ(2, (b - a).value());
EXPECT_EQ(2, (b - 1).value());
EXPECT_EQ(2, (3 - a).value());
static_assert((e - d).value() == 1, "Binary operator- failed");
static_assert((e - 3).value() == 1, "Binary operator- failed");
static_assert((4 - d).value() == 1, "Binary operator- failed");
EXPECT_EQ(3, (a * b).value());
EXPECT_EQ(3, (a * 3).value());
EXPECT_EQ(3, (1 * b).value());
static_assert((d * e).value() == 12, "Binary operator* failed");
static_assert((d * 4).value() == 12, "Binary operator* failed");
static_assert((3 * e).value() == 12, "Binary operator* failed");
EXPECT_EQ(0, (a / b).value());
EXPECT_EQ(0, (a / 3).value());
EXPECT_EQ(0, (1 / b).value());
static_assert((d / e).value() == 0, "Binary operator/ failed");
static_assert((d / 4).value() == 0, "Binary operator/ failed");
static_assert((3 / e).value() == 0, "Binary operator/ failed");
EXPECT_EQ(8, (a << b).value());
EXPECT_EQ(8, (a << 3).value());
EXPECT_EQ(8, (1 << b).value());
static_assert((d << e).value() == 48, "Binary operator<< failed");
static_assert((d << 4).value() == 48, "Binary operator<< failed");
static_assert((3 << e).value() == 48, "Binary operator<< failed");
b = 8;
EXPECT_EQ(4, (b >> a).value());
EXPECT_EQ(4, (b >> 1).value());
EXPECT_EQ(4, (8 >> a).value());
static_assert((d >> e).value() == 0, "Binary operator>> failed");
static_assert((d >> 4).value() == 0, "Binary operator>> failed");
static_assert((3 >> e).value() == 0, "Binary operator>> failed");
b = 3;
a = 2;
EXPECT_EQ(1, (b % a).value());
EXPECT_EQ(1, (b % 2).value());
EXPECT_EQ(1, (3 % a).value());
static_assert((e % d).value() == 1, "Binary operator% failed");
static_assert((e % 3).value() == 1, "Binary operator% failed");
static_assert((4 % d).value() == 1, "Binary operator% failed");
} |
#include "tensorflow/core/data/snapshot_utils.h"
#include <algorithm>
#include <climits>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
#include "tensorflow/core/lib/io/zlib_outputbuffer.h"
#include "tensorflow/core/platform/coding.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tsl/lib/io/snappy/snappy_inputbuffer.h"
#include "tsl/lib/io/snappy/snappy_outputbuffer.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace snapshot_util {
namespace {
constexpr const char* const kOutputTypes = "output_types";
constexpr const char* const kOutputShapes = "output_shapes";
constexpr const char* const kCompression = "compression";
constexpr const char* const kVersion = "version";
constexpr const char* const kCurrentCheckpointID = "current_checkpoint_id";
constexpr const char* const kIndex = "index";
constexpr const char* const kStartIndex = "start_index";
std::string ProtoSerializationErrorMessage(const TensorProto& proto,
const std::string& output_file) {
const auto proto_byte_size = proto.ByteSizeLong();
std::string error_message =
absl::StrCat("Failed to serialize tensor proto of ", proto_byte_size,
" bytes to file: ", output_file);
if (proto_byte_size > INT_MAX) {
absl::StrAppend(&error_message, ": exceeded maximum protobuf size of 2GB.");
}
return error_message;
}
}
constexpr const int64_t
CustomReader::kSnappyReaderInputBufferSizeBytes;
constexpr const int64_t
CustomReader::kSnappyReaderOutputBufferSizeBytes;
std::string HashDirectory(const std::string& path, uint64 hash) {
return io::JoinPath(
path, strings::Printf("%llu", static_cast<unsigned long long>(hash)));
}
std::string RunDirectory(const std::string& hash_directory, uint64 run_id) {
return RunDirectory(
hash_directory,
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
}
std::string RunDirectory(const std::string& hash_directory,
const std::string& run_id) {
return io::JoinPath(hash_directory, run_id);
}
std::string ShardDirectory(const std::string& run_directory, int64_t shard_id) {
return io::JoinPath(
run_directory,
strings::Printf("%08llu%s", static_cast<unsigned long long>(shard_id),
kShardDirectorySuffix));
}
std::string GetCheckpointFileName(const std::string& shard_directory,
uint64 checkpoint_id) {
return io::JoinPath(
shard_directory,
strings::Printf("%08llu.snapshot",
static_cast<unsigned long long>(checkpoint_id)));
}
Status Writer::Create(Env* env, const std::string& filename,
const std::string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Writer>* out_writer) {
switch (version) {
case 1:
*out_writer =
std::make_unique<CustomWriter>(filename, compression_type, dtypes);
break;
case 2:
*out_writer =
std::make_unique<TFRecordWriter>(filename, compression_type);
break;
default:
return errors::InvalidArgument("Snapshot writer version: ", version,
" is not supported.");
}
return (*out_writer)->Initialize(env);
}
TFRecordWriter::TFRecordWriter(const std::string& filename,
const std::string& compression_type)
: filename_(filename), compression_type_(compression_type) {}
Status TFRecordWriter::Initialize(tensorflow::Env* env) {
TF_RETURN_IF_ERROR(env->NewAppendableFile(filename_, &dest_));
record_writer_ = std::make_unique<io::RecordWriter>(
dest_.get(), io::RecordWriterOptions::CreateRecordWriterOptions(
compression_type_));
return absl::OkStatus();
}
Status TFRecordWriter::WriteTensors(const std::vector<Tensor>& tensors) {
for (const auto& tensor : tensors) {
TensorProto proto;
tensor.AsProtoTensorContent(&proto);
#if defined(TF_CORD_SUPPORT)
auto* proto_buffer = new std::string();
if (!proto.SerializeToString(proto_buffer)) {
delete proto_buffer;
return errors::DataLoss(ProtoSerializationErrorMessage(proto, filename_));
}
absl::Cord proto_serialized = absl::MakeCordFromExternal(
*proto_buffer,
[proto_buffer](absl::string_view) { delete proto_buffer; });
TF_RETURN_IF_ERROR(record_writer_->WriteRecord(proto_serialized));
#else
std::string proto_serialized;
if (!proto.SerializeToString(&proto_serialized)) {
return errors::DataLoss(ProtoSerializationErrorMessage(proto, filename_));
}
TF_RETURN_IF_ERROR(record_writer_->WriteRecord(proto_serialized));
#endif
}
return absl::OkStatus();
}
Status TFRecordWriter::Sync() {
TF_RETURN_IF_ERROR(record_writer_->Flush());
return dest_->Flush();
}
Status TFRecordWriter::Close() {
if (record_writer_ != nullptr) {
TF_RETURN_IF_ERROR(Sync());
TF_RETURN_IF_ERROR(record_writer_->Close());
TF_RETURN_IF_ERROR(dest_->Close());
record_writer_ = nullptr;
dest_ = nullptr;
}
return absl::OkStatus();
}
TFRecordWriter::~TFRecordWriter() {
Status s = Close();
if (!s.ok()) {
LOG(ERROR) << "Failed to close snapshot file " << filename_ << ": " << s;
}
}
CustomWriter::CustomWriter(const std::string& filename,
const std::string& compression_type,
const DataTypeVector& dtypes)
: filename_(filename),
compression_type_(compression_type),
dtypes_(dtypes) {}
Status CustomWriter::Initialize(tensorflow::Env* env) {
TF_RETURN_IF_ERROR(env->NewAppendableFile(filename_, &dest_));
#if defined(IS_SLIM_BUILD)
if (compression_type_ != io::compression::kNone) {
LOG(ERROR) << "Compression is unsupported on mobile platforms. Turning "
<< "off compression.";
}
#else
if (compression_type_ == io::compression::kGzip) {
zlib_underlying_dest_.swap(dest_);
io::ZlibCompressionOptions zlib_options;
zlib_options = io::ZlibCompressionOptions::GZIP();
io::ZlibOutputBuffer* zlib_output_buffer = new io::ZlibOutputBuffer(
zlib_underlying_dest_.get(), zlib_options.input_buffer_size,
zlib_options.output_buffer_size, zlib_options);
TF_CHECK_OK(zlib_output_buffer->Init());
dest_.reset(zlib_output_buffer);
}
#endif
simple_tensor_mask_.reserve(dtypes_.size());
for (const auto& dtype : dtypes_) {
if (DataTypeCanUseMemcpy(dtype)) {
simple_tensor_mask_.push_back(true);
num_simple_++;
} else {
simple_tensor_mask_.push_back(false);
num_complex_++;
}
}
return absl::OkStatus();
}
Status CustomWriter::WriteTensors(const std::vector<Tensor>& tensors) {
if (compression_type_ != io::compression::kSnappy) {
experimental::SnapshotRecord record;
for (const auto& tensor : tensors) {
TensorProto* t = record.add_tensor();
tensor.AsProtoTensorContent(t);
}
#if defined(TF_CORD_SUPPORT)
auto record_buffer = new std::string();
record.SerializeToString(record_buffer);
absl::Cord record_serialized = absl::MakeCordFromExternal(
*record_buffer,
[record_buffer](absl::string_view) { delete record_buffer; });
return WriteRecord(record_serialized);
#else
return WriteRecord(record.SerializeAsString());
#endif
}
std::vector<const TensorBuffer*> tensor_buffers;
tensor_buffers.reserve(num_simple_);
std::vector<TensorProto> tensor_protos;
tensor_protos.reserve(num_complex_);
experimental::SnapshotTensorMetadata metadata;
int64_t total_size = 0;
for (int i = 0, end = tensors.size(); i < end; ++i) {
const Tensor& tensor = tensors[i];
experimental::TensorMetadata* tensor_metadata =
metadata.add_tensor_metadata();
tensor.shape().AsProto(tensor_metadata->mutable_tensor_shape());
int64_t size = 0;
if (simple_tensor_mask_[i]) {
auto tensor_buffer = DMAHelper::buffer(&tensor);
tensor_buffers.push_back(tensor_buffer);
size = tensor_buffer->size();
} else {
TensorProto proto;
tensor.AsProtoTensorContent(&proto);
size = proto.ByteSizeLong();
tensor_protos.push_back(std::move(proto));
}
tensor_metadata->set_tensor_size_bytes(size);
total_size += size;
}
std::vector<char> uncompressed(total_size);
char* position = uncompressed.data();
int buffer_index = 0;
int proto_index = 0;
for (int i = 0, end = tensors.size(); i < end; ++i) {
const auto& tensor_metadata = metadata.tensor_metadata(i);
if (simple_tensor_mask_[i]) {
memcpy(position, tensor_buffers[buffer_index]->data(),
tensor_metadata.tensor_size_bytes());
buffer_index++;
} else {
tensor_protos[proto_index].SerializeToArray(
position, tensor_metadata.tensor_size_bytes());
proto_index++;
}
position += tensor_metadata.tensor_size_bytes();
}
DCHECK_EQ(position, uncompressed.data() + total_size);
string output;
if (!tsl::port::Snappy_Compress(uncompressed.data(), total_size, &output)) {
return errors::Internal("Failed to compress using snappy.");
}
#if defined(TF_CORD_SUPPORT)
auto metadata_buffer = new std::string();
metadata.SerializeToString(metadata_buffer);
absl::Cord metadata_serialized = absl::MakeCordFromExternal(
*metadata_buffer,
[metadata_buffer](absl::string_view) { delete metadata_buffer; });
#else
std::string metadata_serialized = metadata.SerializeAsString();
#endif
TF_RETURN_IF_ERROR(WriteRecord(metadata_serialized));
TF_RETURN_IF_ERROR(WriteRecord(output));
return absl::OkStatus();
}
Status CustomWriter::Sync() { return dest_->Sync(); }
Status CustomWriter::Close() {
if (dest_ != nullptr) {
TF_RETURN_IF_ERROR(dest_->Close());
dest_ = nullptr;
}
if (zlib_underlying_dest_ != nullptr) {
TF_RETURN_IF_ERROR(zlib_underlying_dest_->Close());
zlib_underlying_dest_ = nullptr;
}
return absl::OkStatus();
}
CustomWriter::~CustomWriter() {
Status s = Close();
if (!s.ok()) {
LOG(ERROR) << "Could not finish writing file: " << s;
}
}
Status CustomWriter::WriteRecord(const StringPiece& data) {
char header[kHeaderSize];
core::EncodeFixed64(header, data.size());
TF_RETURN_IF_ERROR(dest_->Append(StringPiece(header, sizeof(header))));
return dest_->Append(data);
}
#if defined(TF_CORD_SUPPORT)
Status CustomWriter::WriteRecord(const absl::Cord& data) {
char header[kHeaderSize];
core::EncodeFixed64(header, data.size());
TF_RETURN_IF_ERROR(dest_->Append(StringPiece(header, sizeof(header))));
return dest_->Append(data);
}
#endif
Status Reader::Create(Env* env, const std::string& filename,
const string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Reader>* out_reader) {
switch (version) {
case 0:
case 1:
*out_reader = std::make_unique<CustomReader>(filename, compression_type,
version, dtypes);
break;
case 2:
*out_reader =
std::make_unique<TFRecordReader>(filename, compression_type, dtypes);
break;
default:
return errors::InvalidArgument("Snapshot reader version: ", version,
" is not supported.");
}
return (*out_reader)->Initialize(env);
}
Status Reader::SkipRecords(int64_t num_records) {
for (int i = 0; i < num_records; ++i) {
std::vector<Tensor> unused_tensors;
TF_RETURN_IF_ERROR(ReadTensors(&unused_tensors));
}
return absl::OkStatus();
}
class Reader::Dataset : public DatasetBase {
public:
Dataset(DatasetContext&& ctx, const std::string& shard_dir,
const std::string& compression, const int64_t version,
const DataTypeVector& dtypes,
const std::vector<PartialTensorShape>& shapes,
const int64_t start_index)
: DatasetBase(std::move(ctx)),
shard_dir_(shard_dir),
compression_(compression),
version_(version),
dtypes_(dtypes),
shapes_(shapes),
start_index_(start_index) {}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return shapes_;
}
std::string DebugString() const override { return "SnapshotDatasetReader"; }
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** node) const override {
Node* shard_dir = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(shard_dir_, &shard_dir));
Node* start_index = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(start_index_, &start_index));
AttrValue compression;
b->BuildAttrValue(compression_, &compression);
AttrValue version;
b->BuildAttrValue(version_, &version);
return b->AddDataset(
this,
{std::make_pair(0, shard_dir), std::make_pair(1, start_index)},
{},
{{kCompression, compression}, {kVersion, version}},
true, node);
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(node_name(), prefix)});
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
start_index_(dataset()->start_index_) {}
Status Initialize(IteratorContext* ctx) override {
TF_RETURN_IF_ERROR(Reader::Create(
ctx->env(), GetCurrentFilename(), dataset()->compression_,
dataset()->version_, dataset()->dtypes_, &reader_));
return AdvanceToStartIndex(ctx);
}
protected:
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = false;
Status s = reader_->ReadTensors(out_tensors);
if (!absl::IsOutOfRange(s)) {
start_index_++;
return s;
}
Status status = AdvanceToNextFile(ctx->env());
if (absl::IsNotFound(status)) {
*end_of_sequence = true;
return absl::OkStatus();
}
return status;
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kCurrentCheckpointID),
current_checkpoint_id_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kStartIndex), start_index_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kCurrentCheckpointID),
¤t_checkpoint_id_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(full_name(kStartIndex), &start_index_));
TF_RETURN_IF_ERROR(ctx->env()->FileExists(GetCurrentFilename()));
TF_RETURN_IF_ERROR(Reader::Create(
ctx->env(), GetCurrentFilename(), dataset()->compression_,
dataset()->version_, dataset()->dtypes_, &reader_));
return AdvanceToStartIndex(ctx);
}
private:
Status AdvanceToNextFile(Env* env) {
start_index_ = 0;
current_checkpoint_id_++;
TF_RETURN_IF_ERROR(env->FileExists(GetCurrentFilename()));
return Reader::Create(env, GetCurrentFilename(), dataset()->compression_,
dataset()->version_, dataset()->dtypes_, &reader_);
}
std::string GetCurrentFilename() {
return GetCheckpointFileName(dataset()->shard_dir_,
current_checkpoint_id_);
}
Status AdvanceToStartIndex(IteratorContext* ctx) {
for (int64_t i = 0; i < start_index_; ++i) {
std::vector<Tensor> unused;
TF_RETURN_IF_ERROR(reader_->ReadTensors(&unused));
}
return absl::OkStatus();
}
std::unique_ptr<Reader> reader_;
int64_t current_checkpoint_id_ = 0;
int64_t start_index_;
};
const tstring shard_dir_;
const std::string compression_;
const int64_t version_;
const DataTypeVector dtypes_;
const std::vector<PartialTensorShape> shapes_;
const int64_t start_index_;
};
Reader::DatasetOp::DatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kVersion, &version_));
}
void Reader::DatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
tstring shard_dir;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "shard_dir", &shard_dir));
int64_t start_index;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "start_index", &start_index));
*output =
new Reader::Dataset(DatasetContext(ctx), shard_dir, compression_,
version_, output_types_, output_shapes_, start_index);
}
class Reader::NestedDataset : public DatasetBase {
public:
explicit NestedDataset(DatasetContext&& ctx,
std::vector<DatasetBase*> datasets)
: DatasetBase(std::move(ctx)), datasets_(datasets) {
dtypes_.push_back(DT_VARIANT);
gtl::InlinedVector<int64_t, 1> element_dim_sizes;
element_dim_sizes.push_back(1);
partial_shapes_.emplace_back(element_dim_sizes);
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return partial_shapes_;
}
std::string DebugString() const override {
return "SnapshotNestedDatasetReader";
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->clear();
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** node) const override {
std::vector<Node*> input_graph_nodes;
input_graph_nodes.reserve(datasets_.size());
for (const auto& dataset : datasets_) {
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, dataset, &input_node));
input_graph_nodes.emplace_back(input_node);
}
TF_RETURN_IF_ERROR(
b->AddDataset(this, {},
{std::make_pair(0, input_graph_nodes)},
{}, node));
return absl::OkStatus();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(node_name(), prefix)});
}
private:
std::vector<DatasetBase*> datasets_;
DataTypeVector dtypes_;
std::vector<PartialTensorShape> partial_shapes_;
class Iterator : public DatasetIterator<NestedDataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<NestedDataset>(params) {}
protected:
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const int64_t num_datasets = dataset()->datasets_.size();
*end_of_sequence = num_datasets == index_;
if (!*end_of_sequence) {
Tensor tensor(DT_VARIANT, TensorShape({}));
TF_RETURN_IF_ERROR(
StoreDatasetInVariantTensor(dataset()->datasets_[index_], &tensor));
out_tensors->clear();
out_tensors->push_back(std::move(tensor));
index_++;
}
return absl::OkStatus();
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kIndex), index_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kIndex), &index_));
return absl::OkStatus();
}
private:
int64_t index_ = 0;
};
};
Reader::NestedDatasetOp::NestedDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void Reader::NestedDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
std::vector<DatasetBase*> inputs;
for (size_t i = 0; i < ctx->num_inputs(); ++i) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(i), &input));
inputs.push_back(input);
}
*output = new Reader::NestedDataset(DatasetContext(ctx), inputs);
(*output)->Initialize({});
}
Status Reader::MakeNestedDataset(Env* env,
const std::vector<std::string>& shard_dirs,
const string& compression_type, int version,
const DataTypeVector& dtypes,
const std::vector<PartialTensorShape>& shapes,
const int64_t start_index,
DatasetBase** output) {
std::vector<DatasetBase*> datasets;
datasets.reserve(shard_dirs.size());
for (int64_t i = 0; i < shard_dirs.size(); ++i) {
int64_t dataset_start_index = start_index / shard_dirs.size();
if (start_index % shard_dirs.size() > datasets.size()) {
dataset_start_index++;
}
datasets.push_back(
new Dataset(DatasetContext(DatasetContext::Params(
{"SnapshotDatasetReader",
strings::StrCat("SnapshotDatasetReader/_", i)})),
shard_dirs.at(i), compression_type, version, dtypes, shapes,
dataset_start_index));
datasets.back()->Initialize({});
}
if (!shard_dirs.empty()) {
std::rotate(datasets.begin(),
datasets.begin() + (start_index % shard_dirs.size()),
datasets.end());
}
MakeNestedDataset(datasets, output);
return absl::OkStatus();
}
void Reader::MakeNestedDataset(const std::vector<DatasetBase*>& datasets,
DatasetBase** output) {
*output = new NestedDataset(
DatasetContext(DatasetContext::Params(
{"SnapshotNestedDatasetReader", "SnapshotNestedDatasetReader"})),
datasets);
(*output)->Initialize({});
}
TFRecordReaderImpl::TFRecordReaderImpl(
const std::string& filename, const string& compression,
std::optional<int64_t> output_buffer_size)
: filename_(filename),
offset_(0),
bytes_read_(0),
compression_(compression),
output_buffer_size_(output_buffer_size) {}
Status TFRecordReaderImpl::Initialize(Env* env) {
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename_, &file_));
auto options = io::RecordReaderOptions::CreateRecordReaderOptions(
compression_);
#if !defined(IS_SLIM_BUILD)
if (output_buffer_size_.has_value()) {
options.snappy_options.output_buffer_size = *output_buffer_size_;
options.zlib_options.output_buffer_size = *output_buffer_size_;
}
#endif
record_reader_ = std::make_unique<io::RecordReader>(file_.get(), options);
bytes_read_ = 0;
return absl::OkStatus();
}
absl::StatusOr<Tensor> TFRecordReaderImpl::GetNext() {
tstring record;
TF_RETURN_IF_ERROR(record_reader_->ReadRecord(&offset_, &record));
bytes_read_ += record.size();
return Parse(record);
}
absl::StatusOr<std::vector<Tensor>> TFRecordReaderImpl::GetTensors() {
std::vector<Tensor> tensors;
while (true) {
absl::StatusOr<Tensor> tensor = GetNext();
if (absl::IsOutOfRange(tensor.status())) {
return tensors;
}
TF_RETURN_IF_ERROR(tensor.status());
tensors.push_back(std::move(*tensor));
}
return tensors;
}
absl::StatusOr<Tensor> TFRecordReaderImpl::Parse(const tstring& record) {
TensorProto proto;
if (!proto.ParseFromArray(record.data(), record.size())) {
return errors::DataLoss(
"Unable to parse tensor from stored proto in file: ", filename_,
", record ", offset_, ". Serialized proto: ", record);
}
Tensor tensor;
if (!tensor.FromProto(proto)) {
return errors::DataLoss(
"Unable to parse tensor from stored proto in file: ", filename_,
", record ", offset_, ". TensorProto: ", proto.ShortDebugString());
}
return tensor;
}
Status TFRecordReader::ReadTensors(std::vector<Tensor>* read_tensors) {
read_tensors->clear();
read_tensors->reserve(dtypes_.size());
for (int i = 0; i < dtypes_.size(); ++i) {
TF_ASSIGN_OR_RETURN(Tensor tensor, reader_impl_.GetNext());
read_tensors->push_back(std::move(tensor));
}
return absl::OkStatus();
}
CustomReader::CustomReader(const std::string& filename,
const string& compression_type, const int version,
const DataTypeVector& dtypes)
: filename_(filename),
compression_type_(compression_type),
version_(version),
dtypes_(dtypes) {}
Status CustomReader::Initialize(Env* env) {
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename_, &file_));
input_stream_ = std::make_unique<io::RandomAccessInputStream>(file_.get());
#if defined(IS_SLIM_BUILD)
if (compression_type_ != io::compression::kNone) {
LOG(ERROR) << "Compression is unsupported on mobile platforms. Turning "
<< "off compression.";
}
#else
if (compression_type_ == io::compression::kGzip) {
io::ZlibCompressionOptions zlib_options;
zlib_options = io::ZlibCompressionOptions::GZIP();
input_stream_ = std::make_unique<io::ZlibInputStream>(
input_stream_.release(), zlib_options.input_buffer_size,
zlib_options.output_buffer_size, zlib_options, true);
} else if (compression_type_ == io::compression::kSnappy) {
if (version_ == 0) {
input_stream_ = std::make_unique<tsl::io::SnappyInputBuffer>(
file_.get(), kSnappyReaderInputBufferSizeBytes,
kSnappyReaderOutputBufferSizeBytes);
} else {
input_stream_ =
std::make_unique<io::BufferedInputStream>(file_.get(), 64 << 20);
}
}
#endif
simple_tensor_mask_.reserve(dtypes_.size());
for (const auto& dtype : dtypes_) {
if (DataTypeCanUseMemcpy(dtype)) {
simple_tensor_mask_.push_back(true);
num_simple_++;
} else {
simple_tensor_mask_.push_back(false);
num_complex_++;
}
}
return absl::OkStatus();
}
Status CustomReader::ReadTensors(std::vector<Tensor>* read_tensors) {
profiler::TraceMe activity(
[&]() { return absl::StrCat(kClassName, kSeparator, "ReadTensors"); },
profiler::TraceMeLevel::kInfo);
if (version_ == 0 || compression_type_ != io::compression::kSnappy) {
return ReadTensorsV0(read_tensors);
}
if (version_ != 1) {
return errors::InvalidArgument("Version: ", version_, " is not supported.");
}
if (compression_type_ != io::compression::kSnappy) {
return errors::InvalidArgument("Compression ", compression_type_,
" is not supported.");
}
experimental::SnapshotTensorMetadata metadata;
tstring metadata_str;
TF_RETURN_IF_ERROR(ReadRecord(&metadata_str));
if (!metadata.ParseFromArray(metadata_str.data(), metadata_str.size())) {
return errors::DataLoss("Could not parse SnapshotTensorMetadata");
}
read_tensors->reserve(metadata.tensor_metadata_size());
std::vector<Tensor> simple_tensors;
simple_tensors.reserve(num_simple_);
std::vector<std::pair<std::unique_ptr<char[]>, size_t>> tensor_proto_strs;
tensor_proto_strs.reserve(num_complex_);
TF_RETURN_IF_ERROR(
SnappyUncompress(&metadata, &simple_tensors, &tensor_proto_strs));
int simple_index = 0;
int complex_index = 0;
for (int i = 0, end = simple_tensor_mask_.size(); i < end; ++i) {
if (simple_tensor_mask_[i]) {
read_tensors->push_back(std::move(simple_tensors[simple_index]));
simple_index++;
} else {
auto tensor_proto_str = std::move(tensor_proto_strs[complex_index].first);
size_t tensor_proto_size = tensor_proto_strs[complex_index].second;
TensorProto tp;
if (!tp.ParseFromArray(tensor_proto_str.get(), tensor_proto_size)) {
return errors::Internal("Could not parse TensorProto");
}
Tensor t;
if (!t.FromProto(tp)) {
return errors::Internal("Could not parse Tensor");
}
read_tensors->push_back(std::move( | #include "tensorflow/core/data/snapshot_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/compression.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace data {
namespace snapshot_util {
namespace {
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::LocalTempFilename;
void GenerateTensorVector(tensorflow::DataTypeVector& dtypes,
std::vector<Tensor>& tensors) {
std::string tensor_data(1024, 'a');
for (int i = 0; i < 10; ++i) {
Tensor t(tensor_data.data());
dtypes.push_back(t.dtype());
tensors.push_back(t);
}
}
void SnapshotRoundTrip(std::string compression_type, int version) {
std::vector<Tensor> tensors;
tensorflow::DataTypeVector dtypes;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (int i = 0; i < 100; ++i) {
TF_ASSERT_OK(writer->WriteTensors(tensors));
}
TF_ASSERT_OK(writer->Close());
std::unique_ptr<Reader> reader;
TF_ASSERT_OK(Reader::Create(Env::Default(), filename, compression_type,
version, dtypes, &reader));
for (int i = 0; i < 100; ++i) {
std::vector<Tensor> read_tensors;
TF_ASSERT_OK(reader->ReadTensors(&read_tensors));
EXPECT_EQ(tensors.size(), read_tensors.size());
for (int j = 0; j < read_tensors.size(); ++j) {
TensorProto proto;
TensorProto read_proto;
tensors[j].AsProtoTensorContent(&proto);
read_tensors[j].AsProtoTensorContent(&read_proto);
std::string proto_serialized, read_proto_serialized;
proto.AppendToString(&proto_serialized);
read_proto.AppendToString(&read_proto_serialized);
EXPECT_EQ(proto_serialized, read_proto_serialized);
}
}
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
TEST(SnapshotUtilTest, CombinationRoundTripTest) {
SnapshotRoundTrip(io::compression::kNone, 1);
SnapshotRoundTrip(io::compression::kGzip, 1);
SnapshotRoundTrip(io::compression::kSnappy, 1);
SnapshotRoundTrip(io::compression::kNone, 2);
SnapshotRoundTrip(io::compression::kGzip, 2);
SnapshotRoundTrip(io::compression::kSnappy, 2);
}
TEST(SnapshotUtilTest, MetadataFileRoundTrip) {
experimental::DistributedSnapshotMetadata metadata_in;
metadata_in.set_compression(io::compression::kGzip);
std::string dir = LocalTempFilename();
TF_ASSERT_OK(WriteMetadataFile(Env::Default(), dir, &metadata_in));
experimental::DistributedSnapshotMetadata metadata_out;
bool file_exists;
TF_ASSERT_OK(
ReadMetadataFile(Env::Default(), dir, &metadata_out, &file_exists));
EXPECT_THAT(metadata_in, EqualsProto(metadata_out));
}
TEST(SnapshotUtilTest, MetadataFileDoesntExist) {
experimental::DistributedSnapshotMetadata metadata;
bool file_exists;
TF_ASSERT_OK(ReadMetadataFile(Env::Default(), LocalTempFilename(), &metadata,
&file_exists));
EXPECT_FALSE(file_exists);
}
void SnapshotReaderBenchmarkLoop(::testing::benchmark::State& state,
std::string compression_type, int version) {
tensorflow::DataTypeVector dtypes;
std::vector<Tensor> tensors;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (auto s : state) {
writer->WriteTensors(tensors).IgnoreError();
}
TF_ASSERT_OK(writer->Close());
std::unique_ptr<Reader> reader;
TF_ASSERT_OK(Reader::Create(Env::Default(), filename, compression_type,
version, dtypes, &reader));
for (auto s : state) {
std::vector<Tensor> read_tensors;
reader->ReadTensors(&read_tensors).IgnoreError();
}
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
void SnapshotCustomReaderNoneBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kNone, 1);
}
void SnapshotCustomReaderGzipBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kGzip, 1);
}
void SnapshotCustomReaderSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kSnappy, 1);
}
void SnapshotTFRecordReaderNoneBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kNone, 2);
}
void SnapshotTFRecordReaderGzipBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kGzip, 2);
}
BENCHMARK(SnapshotCustomReaderNoneBenchmark);
BENCHMARK(SnapshotCustomReaderGzipBenchmark);
BENCHMARK(SnapshotCustomReaderSnappyBenchmark);
BENCHMARK(SnapshotTFRecordReaderNoneBenchmark);
BENCHMARK(SnapshotTFRecordReaderGzipBenchmark);
void SnapshotWriterBenchmarkLoop(::testing::benchmark::State& state,
std::string compression_type, int version) {
tensorflow::DataTypeVector dtypes;
std::vector<Tensor> tensors;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (auto s : state) {
writer->WriteTensors(tensors).IgnoreError();
}
writer->Close().IgnoreError();
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
void SnapshotCustomWriterNoneBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kNone, 1);
}
void SnapshotCustomWriterGzipBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kGzip, 1);
}
void SnapshotCustomWriterSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kSnappy, 1);
}
void SnapshotTFRecordWriterNoneBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kNone, 2);
}
void SnapshotTFRecordWriterGzipBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kGzip, 2);
}
void SnapshotTFRecordWriterSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kSnappy, 2);
}
BENCHMARK(SnapshotCustomWriterNoneBenchmark);
BENCHMARK(SnapshotCustomWriterGzipBenchmark);
BENCHMARK(SnapshotCustomWriterSnappyBenchmark);
BENCHMARK(SnapshotTFRecordWriterNoneBenchmark);
BENCHMARK(SnapshotTFRecordWriterGzipBenchmark);
BENCHMARK(SnapshotTFRecordWriterSnappyBenchmark);
}
}
}
} | #endif
Status Reader::Create(Env* env, const std::string& filename,
const string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Reader>* out_reader) {
switch (version) {
case 0:
case 1:
*out_reader = std::make_unique<CustomReader>(filename, compression_type,
version, dtypes);
break;
case 2:
*out_reader =
std::make_unique<TFRecordReader>(filename, compression_type, dtypes);
break;
default:
return errors::InvalidArgument("Snapshot reader version: ", version,
" is not supported.");
}
return (*out_reader)->Initialize(env);
} | TEST(SnapshotUtilTest, CombinationRoundTripTest) {
SnapshotRoundTrip(io::compression::kNone, 1);
SnapshotRoundTrip(io::compression::kGzip, 1);
SnapshotRoundTrip(io::compression::kSnappy, 1);
SnapshotRoundTrip(io::compression::kNone, 2);
SnapshotRoundTrip(io::compression::kGzip, 2);
SnapshotRoundTrip(io::compression::kSnappy, 2);
} |
#ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_DEEP_COPY_TRANSFORM_REP_PTR_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_DEEP_COPY_TRANSFORM_REP_PTR_H_
#include <utility>
#include "tensorstore/index_space/internal/transform_rep.h"
namespace tensorstore {
namespace internal_index_space {
class DeepCopyTransformRepPtr {
public:
DeepCopyTransformRepPtr(std::nullptr_t = nullptr) : ptr_(nullptr) {}
explicit DeepCopyTransformRepPtr(TransformRep* ptr,
internal::adopt_object_ref_t)
: ptr_(ptr) {
assert(ptr == nullptr ||
(ptr->input_rank_capacity == 0 && ptr->output_rank_capacity == 0) ||
ptr->reference_count == 1);
}
explicit DeepCopyTransformRepPtr(TransformRep* ptr,
internal::acquire_object_ref_t) {
if (ptr) {
ptr_ =
TransformRep::Allocate(ptr->input_rank, ptr->output_rank).release();
CopyTransformRep(ptr, ptr_);
} else {
ptr_ = nullptr;
}
}
DeepCopyTransformRepPtr(DeepCopyTransformRepPtr&& other)
: ptr_(std::exchange(other.ptr_, nullptr)) {}
DeepCopyTransformRepPtr(const DeepCopyTransformRepPtr& other)
: DeepCopyTransformRepPtr(other.ptr_, internal::acquire_object_ref) {}
DeepCopyTransformRepPtr& operator=(DeepCopyTransformRepPtr&& other) {
if (ptr_) Free();
ptr_ = std::exchange(other.ptr_, nullptr);
return *this;
}
DeepCopyTransformRepPtr& operator=(const DeepCopyTransformRepPtr& other) {
return *this = DeepCopyTransformRepPtr(other.ptr_,
internal::acquire_object_ref);
}
DeepCopyTransformRepPtr& operator=(std::nullptr_t) {
if (ptr_) Free();
ptr_ = nullptr;
return *this;
}
~DeepCopyTransformRepPtr() {
if (ptr_) Free();
}
explicit operator bool() const { return static_cast<bool>(ptr_); }
TransformRep* get() const { return ptr_; }
TransformRep* operator->() const { return ptr_; }
TransformRep& operator*() const { return *ptr_; }
TransformRep* release() { return std::exchange(ptr_, nullptr); }
private:
void Free() {
TransformRep::Ptr<>(ptr_, internal::adopt_object_ref);
}
TransformRep* ptr_;
};
}
}
#endif | #include "tensorstore/index_space/internal/deep_copy_transform_rep_ptr.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::acquire_object_ref;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal_index_space::DeepCopyTransformRepPtr;
using ::tensorstore::internal_index_space::TransformRep;
TEST(DeepCopyTransformRepPtr, DefaultConstruct) {
DeepCopyTransformRepPtr ptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, Nullptr) {
DeepCopyTransformRepPtr ptr = nullptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, AdoptAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AdoptAllocateZero) {
auto ptr1 = TransformRep::Allocate(0, 0);
ptr1->input_rank = ptr1->output_rank = 0;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AcquireAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.get(), acquire_object_ref);
EXPECT_NE(ptr1.get(), ptr2.get());
EXPECT_EQ(7, ptr2->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, Release) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
auto ptr3 = ptr2.release();
EXPECT_EQ(ptr, ptr3);
TransformRep::Ptr<>(ptr3, adopt_object_ref);
}
TEST(DeepCopyTransformRepPtr, MoveConstruct) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
auto ptr3 = std::move(ptr2);
EXPECT_EQ(ptr, ptr3.get());
EXPECT_FALSE(ptr2);
}
TEST(DeepCopyTransformRepPtr, CopyConstruct) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
auto ptr3 = ptr2;
EXPECT_NE(ptr, ptr3.get());
EXPECT_TRUE(ptr2);
EXPECT_TRUE(ptr3);
EXPECT_EQ(7, ptr3->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, AssignNullptr) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
ptr2 = nullptr;
EXPECT_EQ(nullptr, ptr2.get());
}
TEST(DeepCopyTransformRepPtr, MoveAssignNonNullToNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr3 = std::move(ptr2);
EXPECT_EQ(ptr, ptr3.get());
EXPECT_FALSE(ptr2);
}
TEST(DeepCopyTransformRepPtr, MoveAssignNullToNonNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr2 = std::move(ptr3);
EXPECT_FALSE(ptr2);
EXPECT_FALSE(ptr3);
}
TEST(DeepCopyTransformRepPtr, CopyAssignNonNullToNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr3 = ptr2;
EXPECT_TRUE(ptr2);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_NE(ptr, ptr3.get());
EXPECT_EQ(7, ptr3->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, CopyAssignNullToNonNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr2 = ptr3;
EXPECT_FALSE(ptr2);
EXPECT_FALSE(ptr3);
}
} | TransformRep* operator->() const { return ptr_; } | TEST(DeepCopyTransformRepPtr, AdoptAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
|
#include "tensorflow/core/grappler/utils/functions.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/strings/scanner.h"
namespace tensorflow {
namespace grappler {
GrapplerFunctionItem::GrapplerFunctionItem(
string func_name, string description, AttrSlice func_attr,
std::vector<const FunctionDef::ArgAttrs*> arg_attr,
std::vector<InputArgInstantiation> input_args,
std::vector<OutputArgInstantiation> output_args,
std::vector<ControlOutput> control_outputs, const int graph_def_version,
const bool is_stateful, GraphDef&& function_body)
: description_(std::move(description)),
func_attr_(func_attr),
arg_attr_(std::move(arg_attr)),
input_args_(std::move(input_args)),
output_args_(std::move(output_args)),
control_outputs_(std::move(control_outputs)),
is_stateful_(is_stateful) {
id = std::move(func_name);
graph = std::move(function_body);
graph.mutable_versions()->set_producer(graph_def_version);
for (const InputArgInstantiation& input_arg : input_args_) {
feed.push_back({input_arg.node_name, Tensor()});
}
for (const OutputArgInstantiation& output_arg : output_args_) {
fetch.push_back(output_arg.node_name);
}
for (const ControlOutput& control_output : control_outputs_) {
keep_ops.push_back(control_output.node_name);
}
optimization_options().allow_pruning_stateful_and_dataset_ops = false;
}
const string& GrapplerFunctionItem::description() const { return description_; }
const std::vector<InputArgInstantiation>& GrapplerFunctionItem::inputs() const {
return input_args_;
}
const InputArgInstantiation& GrapplerFunctionItem::input(int i) const {
return input_args_[i];
}
const std::size_t GrapplerFunctionItem::input_size() const {
return input_args_.size();
}
const std::vector<OutputArgInstantiation>& GrapplerFunctionItem::outputs()
const {
return output_args_;
}
const OutputArgInstantiation& GrapplerFunctionItem::output(int i) const {
return output_args_[i];
}
const std::size_t GrapplerFunctionItem::output_size() const {
return output_args_.size();
}
const std::vector<ControlOutput>& GrapplerFunctionItem::control_outputs()
const {
return control_outputs_;
}
const std::size_t GrapplerFunctionItem::control_output_size() const {
return control_outputs_.size();
}
const AttrSlice& GrapplerFunctionItem::func_attr() const { return func_attr_; }
const std::vector<const FunctionDef::ArgAttrs*>&
GrapplerFunctionItem::arg_attr() const {
return arg_attr_;
}
const GraphDef& GrapplerFunctionItem::function_body() const { return graph; }
GraphDef& GrapplerFunctionItem::mutable_function_body() { return graph; }
bool GrapplerFunctionItem::is_stateful() const { return is_stateful_; }
GrapplerFunctionItem& GrapplerFunctionItem::SwapFunctionBody(GraphDef&& other) {
graph = std::move(other);
return *this;
}
bool HasParametrizedType(const FunctionDef& func) {
const auto is_type_parametrized = [](const OpDef::ArgDef& arg) {
return !arg.type_attr().empty() || !arg.number_attr().empty() ||
!arg.type_list_attr().empty();
};
const auto& input = func.signature().input_arg();
const auto& output = func.signature().output_arg();
return std::any_of(input.begin(), input.end(), is_type_parametrized) ||
std::any_of(output.begin(), output.end(), is_type_parametrized);
}
bool HasParametrizedBody(const FunctionDef& func) {
const auto is_parametrized = [&](const NodeDef& node) {
for (const auto& attr : node.attr()) {
if (!attr.second.placeholder().empty()) return true;
}
return false;
};
return std::any_of(func.node_def().begin(), func.node_def().end(),
is_parametrized);
}
bool IsParametrized(const FunctionDef& func) {
return HasParametrizedType(func) || HasParametrizedBody(func);
}
Status InstantiationTypeParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, DataType>* type_parameters) {
if (!type_parameters->empty()) {
return absl::InvalidArgumentError(
"Type parameters output map must be empty");
}
const auto resolve_type_attr = [&](const OpDef::ArgDef& arg) -> Status {
if (!arg.type_attr().empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(
GetNodeAttr(func_instantiation_attr, arg.type_attr(), &dtype));
type_parameters->emplace(arg.type_attr(), dtype);
} else if (!arg.type_list_attr().empty()) {
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(
GetNodeAttr(func_instantiation_attr, arg.type_list_attr(), &dtypes));
int index = 0;
for (const DataType& dtype : dtypes) {
type_parameters->emplace(absl::StrCat(arg.type_list_attr(), ":", index),
dtype);
++index;
}
}
return absl::OkStatus();
};
for (const auto& input : func.signature().input_arg())
TF_RETURN_IF_ERROR(resolve_type_attr(input));
for (const auto& output : func.signature().output_arg())
TF_RETURN_IF_ERROR(resolve_type_attr(output));
return absl::OkStatus();
}
Status InstantiationBodyParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, AttrValue>* body_parameters) {
if (!body_parameters->empty()) {
return absl::InvalidArgumentError(
"Body parameters output map must be empty");
}
for (const NodeDef& func_body_node : func.node_def()) {
for (auto& attr : func_body_node.attr()) {
const string& placeholder = attr.second.placeholder();
if (placeholder.empty() || body_parameters->contains(placeholder)) {
continue;
}
const AttrValue* placeholder_value =
func_instantiation_attr.Find(placeholder);
if (placeholder_value) {
body_parameters->insert({placeholder, *placeholder_value});
} else {
return absl::InvalidArgumentError(
absl::StrCat("Can't resolve placeholder: ", placeholder));
}
}
}
return absl::OkStatus();
}
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const AttrSlice& func_instantiation_attr,
const FunctionLibraryDefinition& flib,
const int graph_def_version,
GrapplerFunctionItem* item) {
const OpDef& signature = func.signature();
if (signature.name().empty()) {
return absl::InvalidArgumentError("Function name must be specified");
}
for (const OpDef::AttrDef& attr : signature.attr()) {
if (attr.type() != "type") {
return absl::InvalidArgumentError(
"Function signature must have only type attributes");
}
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(func, func_instantiation_attr, &flib, &fbody));
GraphDef function_body;
fbody->graph->ToGraphDef(&function_body);
*function_body.mutable_library() = flib.ReachableDefinitions(func).ToProto();
VLOG(3) << absl::Substitute(
"Deleted $0 unreachable functions from the Grappler function item "
"instantiation of $1 (library size = $2)",
flib.num_functions() - function_body.library().function_size(),
signature.name(), function_body.library().function_size());
const int num_instantiated_inputs = fbody->arg_types.size();
const int num_instantiated_outputs = fbody->ret_types.size();
std::vector<InputArgInstantiation> inputs;
inputs.reserve(num_instantiated_inputs);
for (int in_id = 0; in_id < num_instantiated_inputs; ++in_id) {
const Node* node = fbody->arg_nodes[in_id];
const DataType& dtype = fbody->arg_types[in_id];
inputs.emplace_back(node->name(), dtype);
}
std::vector<OutputArgInstantiation> outputs;
outputs.reserve(num_instantiated_outputs);
for (int out_id = 0; out_id < num_instantiated_outputs; ++out_id) {
const Node* node = fbody->ret_nodes[out_id];
const DataType& dtype = fbody->ret_types[out_id];
outputs.emplace_back(node->name(), dtype);
}
std::vector<ControlOutput> control_outputs;
control_outputs.reserve(func.control_ret_size());
for (const auto& control_ret : func.control_ret()) {
control_outputs.push_back({control_ret.first, control_ret.second});
}
std::sort(control_outputs.begin(), control_outputs.end());
std::vector<const FunctionDef::ArgAttrs*> arg_attr(inputs.size(), nullptr);
for (const auto& attr : func.arg_attr()) {
if (attr.first >= inputs.size()) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid attribute index, got ", attr.first,
" but expected less than ", inputs.size()));
}
arg_attr.at(attr.first) = &attr.second;
}
*item = GrapplerFunctionItem(
signature.name(),
signature.description(),
AttrSlice(&func.attr()), std::move(arg_attr),
std::move(inputs), std::move(outputs), std::move(control_outputs),
graph_def_version, signature.is_stateful(), std::move(function_body));
return absl::OkStatus();
}
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const FunctionLibraryDefinition& flib,
const int graph_def_version,
GrapplerFunctionItem* item) {
return MakeGrapplerFunctionItem(func, AttrSlice(), flib, graph_def_version,
item);
}
Status ReplaceInputWithConst(const NodeDef& input_const, int input_index,
GrapplerFunctionItem* item) {
if (!IsConstant(input_const)) {
return absl::InvalidArgumentError(absl::StrCat(
"Input node is not a constant: ", SummarizeNodeDef(input_const)));
}
const int item_input_size = item->input_size();
if (input_index < 0 || input_index >= item_input_size) {
return absl::InvalidArgumentError(absl::StrCat(
"Function input index is out of bound: index=", input_index,
" input_size=", item->input_size()));
}
const InputArgInstantiation& input_arg = item->input(input_index);
for (NodeDef& node : *item->graph.mutable_node()) {
if (node.name() == input_arg.node_name) {
node = input_const;
node.set_name(input_arg.node_name);
node.clear_input();
node.clear_device();
}
if (IsArg(node)) {
auto attrs = AttrSlice(node);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "index", &index));
if (index >= input_index) {
(*node.mutable_attr())["index"].set_i(index - 1);
}
}
}
item->input_args_.erase(item->input_args_.begin() + input_index);
item->arg_attr_.erase(item->arg_attr_.begin() + input_index);
return absl::OkStatus();
}
Status RemoveFunctionOutputs(const absl::flat_hash_set<int>& remove_outputs,
GrapplerFunctionItem* item,
std::vector<std::pair<int, int>>* output_mapping) {
DCHECK(output_mapping->empty());
for (int remove_output : remove_outputs) {
const int item_output_size = item->output_size();
if (remove_output < 0 || remove_output >= item_output_size) {
return absl::InvalidArgumentError(absl::StrCat(
"Function output index is out of bound: index=", remove_output,
" output_size=", item->output_size()));
}
}
absl::flat_hash_set<const OutputArgInstantiation*> remove_output_args;
const auto is_remove_output_arg = [&](const OutputArgInstantiation& output) {
return remove_output_args.find(&output) != remove_output_args.end();
};
for (int i = 0, end = item->output_size(); i < end; ++i) {
const OutputArgInstantiation& output = item->output(i);
if (remove_outputs.contains(i)) {
VLOG(3) << "Remove functions output: name=" << output.node_name
<< "(index = " << i << ")";
remove_output_args.insert(&output);
} else if (!remove_output_args.empty()) {
output_mapping->push_back({i, i - remove_output_args.size()});
}
}
for (NodeDef& node : *item->graph.mutable_node()) {
if (IsRetval(node)) {
auto attrs = AttrSlice(node);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "index", &index));
for (const auto& mapping : *output_mapping) {
const int from = mapping.first;
const int to = mapping.second;
if (index == from) {
(*node.mutable_attr())["index"].set_i(to);
}
}
}
}
auto& o = item->output_args_;
o.erase(std::remove_if(o.begin(), o.end(), is_remove_output_arg), o.end());
return absl::OkStatus();
}
namespace {
class MakeFunctionDefHelper {
public:
MakeFunctionDefHelper() = default;
Status Initialize(const GrapplerFunctionItem& item,
const FunctionLibraryDefinition& flib);
Status AsFunctionDefInput(const string& graph_def_input,
string* func_def_input) const;
Status AsFunctionDefNode(NodeDef* function_body_node) const;
bool IsInputNode(const NodeDef& node) const {
return input_nodes_.contains(node.name());
}
bool IsOutputNode(const NodeDef& node) const {
return output_nodes_.contains(node.name());
}
private:
absl::flat_hash_set<absl::string_view> input_nodes_;
absl::flat_hash_set<absl::string_view> output_nodes_;
absl::flat_hash_map<string, tensorflow::NameRangeMap> function_body_outputs_;
};
Status MakeFunctionDefHelper::Initialize(
const GrapplerFunctionItem& item, const FunctionLibraryDefinition& flib) {
for (const InputArgInstantiation& input_arg : item.inputs()) {
input_nodes_.insert(input_arg.node_name);
}
for (const OutputArgInstantiation& output_arg : item.outputs()) {
output_nodes_.insert(output_arg.node_name);
}
for (const NodeDef& node : item.function_body().node()) {
const OpRegistrationData* registration;
TF_RETURN_IF_ERROR(flib.LookUp(node.op(), ®istration));
tensorflow::NameRangeMap outputs_range_map;
TF_RETURN_IF_ERROR(tensorflow::NameRangesForNode(
node, registration->op_def, nullptr, &outputs_range_map));
function_body_outputs_.emplace(node.name(), std::move(outputs_range_map));
}
return absl::OkStatus();
}
Status MakeFunctionDefHelper::AsFunctionDefInput(const string& graph_def_input,
string* func_def_input) const {
if (IsControlInput(graph_def_input)) {
*func_def_input = graph_def_input;
return absl::OkStatus();
}
const SafeTensorId tensor = ParseTensorName(graph_def_input);
DCHECK_GE(tensor.index(), 0);
const auto is_input = input_nodes_.find(tensor.node());
if (is_input != input_nodes_.end()) {
DCHECK_EQ(tensor.index(), 0);
*func_def_input = tensor.node();
return absl::OkStatus();
}
const auto is_body_output = function_body_outputs_.find(tensor.node());
if (is_body_output != function_body_outputs_.end()) {
const tensorflow::NameRangeMap& outputs_range_map = is_body_output->second;
for (const auto& el : outputs_range_map) {
const auto& output_name = el.first;
const auto& output_range = el.second;
if (tensor.index() >= output_range.first &&
tensor.index() < output_range.second) {
*func_def_input = absl::StrCat(tensor.node(), ":", output_name, ":",
tensor.index() - output_range.first);
return absl::OkStatus();
}
}
}
return absl::InvalidArgumentError(
absl::StrCat("Unknown graph def input: ", graph_def_input));
}
Status MakeFunctionDefHelper::AsFunctionDefNode(
NodeDef* function_body_node) const {
string func_def_input;
for (int i = 0; i < function_body_node->input_size(); ++i) {
TF_RETURN_IF_ERROR(
AsFunctionDefInput(function_body_node->input(i), &func_def_input));
function_body_node->set_input(i, func_def_input);
}
return absl::OkStatus();
}
}
Status MakeFunctionDef(const GrapplerFunctionItem& item,
const FunctionLibraryDefinition& flib,
FunctionDef* func) {
func->mutable_signature()->set_name(item.id);
func->mutable_signature()->set_description(item.description());
func->mutable_signature()->set_is_stateful(item.is_stateful());
MakeFunctionDefHelper helper;
TF_RETURN_IF_ERROR(helper.Initialize(item, flib));
absl::flat_hash_map<absl::string_view, string> output_tensors;
for (const NodeDef& func_body_node : item.function_body().node()) {
if (!helper.IsOutputNode(func_body_node)) continue;
if (func_body_node.input_size() != 1) {
return absl::InternalError(
absl::StrCat("_Retval node must have single input: ",
SummarizeNodeDef(func_body_node)));
}
output_tensors.emplace(func_body_node.name(), func_body_node.input(0));
}
for (const InputArgInstantiation& input_arg : item.inputs()) {
OpDef::ArgDef arg_def;
arg_def.set_name(input_arg.node_name);
arg_def.set_type(input_arg.data_type);
arg_def.set_is_ref(IsRefType(input_arg.data_type));
*func->mutable_signature()->add_input_arg() = arg_def;
}
for (const OutputArgInstantiation& output_arg : item.outputs()) {
const string output_name =
absl::StrReplaceAll(output_arg.node_name, {{"_RetVal", ""}});
OpDef::ArgDef arg_def;
arg_def.set_name(output_name);
arg_def.set_type(output_arg.data_type);
arg_def.set_is_ref(IsRefType(output_arg.data_type));
*func->mutable_signature()->add_output_arg() = arg_def;
auto it = output_tensors.find(output_arg.node_name);
if (it == output_tensors.end()) {
return absl::InternalError(
absl::StrCat("Can't find an output tensor for the output node: ",
output_arg.node_name));
}
TF_RETURN_IF_ERROR(helper.AsFunctionDefInput(
it->second, &(*func->mutable_ret())[output_name]));
}
for (const ControlOutput& control_out : item.control_outputs()) {
func->mutable_control_ret()->insert(
{control_out.output_name, control_out.node_name});
*func->mutable_signature()->add_control_output() = control_out.output_name;
}
for (const auto& attr : item.func_attr()) {
const auto& attr_name = attr.first;
const auto& attr_value = attr.second;
(*func->mutable_attr())[attr_name] = attr_value;
}
for (int i = 0, end = item.arg_attr().size(); i < end; ++i) {
const auto* attr = item.arg_attr().at(i);
if (attr != nullptr) {
(*func->mutable_arg_attr())[i] = *attr;
}
}
for (const NodeDef& func_node : item.function_body().node()) {
if (IsArg(func_node) || IsRetval(func_node) ||
helper.IsInputNode(func_node) || helper.IsOutputNode(func_node))
continue;
NodeDef* func_def_node = func->add_node_def();
*func_def_node = func_node;
TF_RETURN_IF_ERROR(helper.AsFunctionDefNode(func_def_node));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/utils/functions.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/device:CPU:0";
class FunctionsTest : public ::testing::Test {};
TEST_F(FunctionsTest, IsParametrized) {
FunctionDef parametrized_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef non_parametrized_func = FunctionDefHelper::Create(
"MyMul", {"x:float", "y:float"}, {"z:float"}, {},
{{{"output"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "output:z:0"}});
EXPECT_TRUE(HasParametrizedType(parametrized_func));
EXPECT_TRUE(HasParametrizedBody(parametrized_func));
EXPECT_TRUE(IsParametrized(parametrized_func));
EXPECT_FALSE(HasParametrizedType(non_parametrized_func));
EXPECT_FALSE(HasParametrizedBody(non_parametrized_func));
EXPECT_FALSE(IsParametrized(non_parametrized_func));
}
TEST_F(FunctionsTest, InstantiationParameters) {
FunctionDef func = FunctionDefHelper::Create(
"ParametrizedFunc",
{"input1:A", "input2:B", "input3:float", "input4: C"},
{"output1: A", "output2:D"},
{
"A: {float, double}",
"B: {float, int32}",
"C: list(type)",
"D: {float, double}",
},
{{{"output"}, "FakeOp", {"input1", "input2"}, {{"key", "$key"}}}},
{{"x", "cx:output:0"}, {"y", "cy:output:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["key"].set_s("key-value");
func_instantiation_attr["A"].set_type(DT_FLOAT);
func_instantiation_attr["B"].set_type(DT_INT32);
func_instantiation_attr["C"].mutable_list()->add_type(DT_FLOAT);
func_instantiation_attr["C"].mutable_list()->add_type(DT_INT32);
func_instantiation_attr["D"].set_type(DT_DOUBLE);
absl::flat_hash_map<string, DataType> type_parameters;
TF_EXPECT_OK(InstantiationTypeParameters(
func, AttrSlice(&func_instantiation_attr), &type_parameters));
ASSERT_EQ(5, type_parameters.size());
EXPECT_EQ(DT_FLOAT, type_parameters["A"]);
EXPECT_EQ(DT_INT32, type_parameters["B"]);
EXPECT_EQ(DT_FLOAT, type_parameters["C:0"]);
EXPECT_EQ(DT_INT32, type_parameters["C:1"]);
EXPECT_EQ(DT_DOUBLE, type_parameters["D"]);
absl::flat_hash_map<string, AttrValue> body_parameters;
TF_EXPECT_OK(InstantiationBodyParameters(
func, AttrSlice(&func_instantiation_attr), &body_parameters));
ASSERT_EQ(1, body_parameters.size());
EXPECT_EQ("key-value", body_parameters["key"].s());
}
TEST_F(FunctionsTest, FromSimpleFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("XTimesTwo", item.id);
EXPECT_EQ(5, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("y_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "two" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "scale" && ++count) {
EXPECT_EQ("Cast", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("two", node.input(0));
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale", node.input(1));
} else if (node.name() == "y_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ(0, node.attr().at("index").i());
}
}
EXPECT_EQ(5, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithMultiOutputNodes) {
std::vector<FunctionDefHelper::Node> nodes = {
{{"sx"}, "Shape", {"x"}},
{{"sy"}, "Shape", {"y"}},
{{"gx"}, "Identity", {"dz"}},
{{"gy"}, "Neg", {"dz"}},
{{"rx", "ry"}, "BroadcastGradientArgs", {"sx", "sy"}},
{{"sum_gx"}, "Sum", {"gx", "rx"}},
{{"dx"}, "Reshape", {"sum_gx", "sx"}},
{{"sum_gy"}, "Sum", {"gy", "ry"}},
{{"dy"}, "Reshape", {"sum_gy", "sy"}},
};
for (auto &n : nodes) {
if (n.attr.empty() && n.op != "BroadcastGradientArgs") {
n.attr = {{"T", "$T"}};
}
}
FunctionDef func = FunctionDefHelper::Define(
"SubGrad",
{"x: T", "y: T", "dz: T"},
{"dx: T", "dy: T"},
{{"T: {half, float, double}"}},
nodes);
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SubGrad", item.id);
EXPECT_EQ(14, item.function_body().node_size());
ASSERT_EQ(3, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ("y", item.input(1).node_name);
EXPECT_EQ("dz", item.input(2).node_name);
ASSERT_EQ(2, item.output_size());
EXPECT_EQ("dx_RetVal", item.output(0).node_name);
EXPECT_EQ("dy_RetVal", item.output(1).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y" || node.name() == "dz") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : node.name() == "y" ? 1 : 2;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "rx" && ++count) {
EXPECT_EQ("BroadcastGradientArgs", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("sx", node.input(0));
EXPECT_EQ("sy", node.input(1));
} else if (node.name() == "sum_gx" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gx", node.input(0));
EXPECT_EQ("rx", node.input(1));
} else if (node.name() == "sum_gy" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gy", node.input(0));
EXPECT_EQ("rx:1", node.input(1));
} else if (node.name() == "dx_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dx", node.input(0));
} else if (node.name() == "dy_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(1, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dy", node.input(0));
}
}
EXPECT_EQ(8, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithNestedFuncs) {
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
TF_ASSERT_OK(flib.AddFunctionDef(FunctionDefHelper::Define(
"Swap",
{"i0: T", "i1: T"},
{"o0: T", "o1: T"},
{"T: {float, double}"},
{{{"o0"}, "Identity", {"i1"}, {{"T", "$T"}}},
{{"o1"}, "Identity", {"i0"}, {{"T", "$T"}}}})));
FunctionDef func = FunctionDefHelper::Create(
"ManySwapsFirst",
{"x: float", "y: float"},
{"o: float"},
{},
{{{"a0"}, "Swap", {"x", "y"}, {{"T", DT_FLOAT}}, {"x2"}},
{{"a1"}, "Swap", {"a0:o0:0", "a0:o1:0"}, {{"T", DT_FLOAT}}},
{{"x2"}, "Mul", {"x", "x"}, {{"T", DT_FLOAT}}},
{{"y2"}, "Mul", {"y", "y"}, {{"T", DT_FLOAT}}, {"a1"}},
{{"o"}, "Add", {"x2:z:0", "y2:z:0"}, {{"T", DT_FLOAT}}}},
{{"o", "o:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : 1;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "a0" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^x2", node.input(2));
} else if (node.name() == "a1" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("a0", node.input(0));
EXPECT_EQ("a0:1", node.input(1));
} else if (node.name() == "x2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("x", node.input(1));
} else if (node.name() == "y2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^a1", node.input(2));
} else if (node.name() == "o" && ++count) {
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x2", node.input(0));
EXPECT_EQ("y2", node.input(1));
}
}
EXPECT_EQ(7, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithOutputMappings) {
FunctionDef func = FunctionDefHelper::Create(
"Exp_func",
{"in: float"},
{"out: float"},
{},
{{{"Linear_func"}, "Identity", {"in"}, {{"T", DT_FLOAT}}},
{{"Exp"}, "Exp", {"Linear_func:output:0"}, {{"T", DT_FLOAT}}}},
{{"out", "Exp:y:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("out_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "in" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Linear_func" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("in", node.input(0));
} else if (node.name() == "Exp" && ++count) {
EXPECT_EQ("Exp", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("Linear_func", node.input(0));
} else if (node.name() == "out_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("Exp", node.input(0));
}
}
EXPECT_EQ(4, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithoutInput) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"GenerateTwo",
{},
{"o: T"},
{"T: {float, double}"},
{{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"o"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("o_RetVal", item.output(0).node_name);
EXPECT_EQ(3, item.function_body().node_size());
const NodeDef &two = item.function_body().node(0);
EXPECT_EQ("two", two.name());
EXPECT_EQ(0, two.input_size());
const NodeDef &cast = item.function_body().node(1);
EXPECT_EQ("o", cast.name());
EXPECT_EQ(1, cast.input_size());
EXPECT_EQ("two", cast.input(0));
const NodeDef &retval = item.function_body().node(2);
EXPECT_EQ("o_RetVal", retval.name());
EXPECT_EQ(1, retval.input_size());
EXPECT_EQ("o", retval.input(0));
}
TEST_F(FunctionsTest, FromFunctionDefWithSideEffectfulOps) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Define(
"SideEffects",
{"x: Ref(float)"},
{},
{},
{{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one"}, {{"T", DT_FLOAT}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SideEffects", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
const auto &opts = item.optimization_options();
EXPECT_FALSE(opts.allow_pruning_stateful_and_dataset_ops);
}
TEST_F(FunctionsTest, FromFunctionDefWithControlOutputs) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Create(
"WithControlOutputs", {"x: Ref(float)"}, {}, {},
{
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
},
{}, {{"side_effects", "update"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("WithControlOutputs", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
ASSERT_EQ(1, item.keep_ops.size());
EXPECT_EQ("update", item.keep_ops[0]);
ASSERT_EQ(1, item.control_output_size());
const ControlOutput &ctrl = item.control_outputs()[0];
EXPECT_EQ("side_effects", ctrl.output_name);
EXPECT_EQ("update", ctrl.node_name);
}
TEST_F(FunctionsTest, MakeFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
const uint32 arg_index = 0;
const std::pair<string, string> arg_attr_key_and_value = {"_arg_attr", "abc"};
FunctionDef::ArgAttrs arg_attr;
(*arg_attr.mutable_attr())[arg_attr_key_and_value.first].set_s(
arg_attr_key_and_value.second);
(*func.mutable_arg_attr())[arg_index] = arg_attr;
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
EXPECT_EQ("x", specialized.signature().input_arg(0).name());
EXPECT_EQ(DT_FLOAT, specialized.signature().input_arg(0).type());
EXPECT_EQ("y", specialized.signature().output_arg(0).name());
EXPECT_EQ(DT_FLOAT, specialized.signature().output_arg(0).type());
EXPECT_EQ(specialized.arg_attr().size(), 1);
EXPECT_EQ(specialized.arg_attr().at(arg_index).attr().size(), 1);
EXPECT_EQ(specialized.arg_attr()
.at(arg_index)
.attr()
.at(arg_attr_key_and_value.first)
.s(),
arg_attr_key_and_value.second);
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "scale" && ++count) {
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale:y:0", node.input(1));
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
}
}
EXPECT_EQ(2, count);
}
TEST_F(FunctionsTest, ReplaceInputWithConst) {
FunctionDef func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(2, item.input_size());
EXPECT_EQ(1, item.output_size());
ASSERT_EQ(4, item.function_body().node_size());
const NodeDef &input_x = item.function_body().node(0);
const NodeDef &input_y = item.function_body().node(1);
EXPECT_EQ("_Arg", input_x.op());
EXPECT_EQ("_Arg", input_y.op());
NodeDef const_input_x;
const_input_x.set_op("Const");
AddNodeAttr("Tag", "const_input_x", &const_input_x);
NodeDef const_input_y;
const_input_y.set_op("Const");
AddNodeAttr("Tag", "const_input_y", &const_input_y);
TF_EXPECT_OK(ReplaceInputWithConst(const_input_x, 0, &item));
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("Const", input_x.op());
EXPECT_EQ("const_input_x", input_x.attr().at("Tag").s());
TF_EXPECT_OK(ReplaceInputWithConst(const_input_y, 0, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ("Const", input_y.op());
EXPECT_EQ("const_input_y", input_y.attr().at("Tag").s());
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
EXPECT_EQ(0, specialized.signature().input_arg_size());
EXPECT_EQ(1, specialized.signature().output_arg_size());
EXPECT_EQ(3, specialized.node_def_size());
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ("const_input_x", node.attr().at("Tag").s());
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ("const_input_y", node.attr().at("Tag").s());
} else if (node.name() == "output" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("x:output:0", node.input(0));
EXPECT_EQ("y:output:0", node.input(1));
}
}
EXPECT_EQ(3, count);
}
TEST_F(FunctionsTest, SwapFunctionBodyAndMakeFunctionDef) {
using ::tensorflow::test::function::NDef;
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
GraphDef id_func_body = test::function::GDef(
{
NDef("read_x", "Identity", {"x"}, {{"T", "float"}}),
NDef("z_RetVal", "_Retval", {"read_x"}, {{"T", "float"}})});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionDefLibrary lib_def;
*lib_def.add_function() = func;
*lib_def.add_function() = mul_func;
FunctionLibraryDefinition flib(OpRegistry::Global(), lib_def);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
item.SwapFunctionBody(std::move(id_func_body));
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "read_x" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("x", node.input(0));
}
}
EXPECT_EQ(1, count);
EXPECT_EQ("read_x:output:0", (*specialized.mutable_ret())["z"]);
}
TEST_F(FunctionsTest, FunctionDefGrapplerFunctionItemRoundTrip) {
FunctionDef func = FunctionDefHelper::Create(
"DoNothing", {"i: int32"}, {"o: int32"},
{},
{
{{"id"}, "Identity", {"i"}, {{"T", DT_INT32}}},
},
{{"o", "id:output:0"}},
{{"must_execute", "id"}});
constexpr char description[] = "This is a helpful description.";
func.mutable_signature()->set_description(description);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_INT32);
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
FunctionDef func2;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &func2));
EXPECT_TRUE(FunctionDefsEqual(func, func2));
}
}
}
} | const std::vector<InputArgInstantiation>& GrapplerFunctionItem::inputs() const {
return input_args_;
} | TEST_F(FunctionsTest, FromSimpleFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("XTimesTwo", item.id);
EXPECT_EQ(5, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("y_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "two" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "scale" && ++count) {
EXPECT_EQ("Cast", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("two", node.input(0));
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale", node.input(1));
} else if (node.name() == "y_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ(0, node.attr().at("index").i());
}
}
EXPECT_EQ(5, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithMultiOutputNodes) {
std::vector<FunctionDefHelper::Node> nodes = {
{{"sx"}, "Shape", {"x"}},
{{"sy"}, "Shape", {"y"}},
{{"gx"}, "Identity", {"dz"}},
{{"gy"}, "Neg", {"dz"}},
{{"rx", "ry"}, "BroadcastGradientArgs", {"sx", "sy"}},
{{"sum_gx"}, "Sum", {"gx", "rx"}},
{{"dx"}, "Reshape", {"sum_gx", "sx"}},
{{"sum_gy"}, "Sum", {"gy", "ry"}},
{{"dy"}, "Reshape", {"sum_gy", "sy"}},
};
for (auto &n : nodes) {
if (n.attr.empty() && n.op != "BroadcastGradientArgs") {
n.attr = {{"T", "$T"}};
}
}
FunctionDef func = FunctionDefHelper::Define(
"SubGrad",
{"x: T", "y: T", "dz: T"},
{"dx: T", "dy: T"},
{{"T: {half, float, double}"}},
nodes);
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SubGrad", item.id);
EXPECT_EQ(14, item.function_body().node_size());
ASSERT_EQ(3, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ("y", item.input(1).node_name);
EXPECT_EQ("dz", item.input(2).node_name);
ASSERT_EQ(2, item.output_size());
EXPECT_EQ("dx_RetVal", item.output(0).node_name);
EXPECT_EQ("dy_RetVal", item.output(1).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y" || node.name() == "dz") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : node.name() == "y" ? 1 : 2;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "rx" && ++count) {
EXPECT_EQ("BroadcastGradientArgs", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("sx", node.input(0));
EXPECT_EQ("sy", node.input(1));
} else if (node.name() == "sum_gx" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gx", node.input(0));
EXPECT_EQ("rx", node.input(1));
} else if (node.name() == "sum_gy" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gy", node.input(0));
EXPECT_EQ("rx:1", node.input(1));
} else if (node.name() == "dx_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dx", node.input(0));
} else if (node.name() == "dy_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(1, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dy", node.input(0));
}
}
EXPECT_EQ(8, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithNestedFuncs) {
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
TF_ASSERT_OK(flib.AddFunctionDef(FunctionDefHelper::Define(
"Swap",
{"i0: T", "i1: T"},
{"o0: T", "o1: T"},
{"T: {float, double}"},
{{{"o0"}, "Identity", {"i1"}, {{"T", "$T"}}},
{{"o1"}, "Identity", {"i0"}, {{"T", "$T"}}}})));
FunctionDef func = FunctionDefHelper::Create(
"ManySwapsFirst",
{"x: float", "y: float"},
{"o: float"},
{},
{{{"a0"}, "Swap", {"x", "y"}, {{"T", DT_FLOAT}}, {"x2"}},
{{"a1"}, "Swap", {"a0:o0:0", "a0:o1:0"}, {{"T", DT_FLOAT}}},
{{"x2"}, "Mul", {"x", "x"}, {{"T", DT_FLOAT}}},
{{"y2"}, "Mul", {"y", "y"}, {{"T", DT_FLOAT}}, {"a1"}},
{{"o"}, "Add", {"x2:z:0", "y2:z:0"}, {{"T", DT_FLOAT}}}},
{{"o", "o:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : 1;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "a0" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^x2", node.input(2));
} else if (node.name() == "a1" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("a0", node.input(0));
EXPECT_EQ("a0:1", node.input(1));
} else if (node.name() == "x2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("x", node.input(1));
} else if (node.name() == "y2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^a1", node.input(2));
} else if (node.name() == "o" && ++count) {
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x2", node.input(0));
EXPECT_EQ("y2", node.input(1));
}
}
EXPECT_EQ(7, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithOutputMappings) {
FunctionDef func = FunctionDefHelper::Create(
"Exp_func",
{"in: float"},
{"out: float"},
{},
{{{"Linear_func"}, "Identity", {"in"}, {{"T", DT_FLOAT}}},
{{"Exp"}, "Exp", {"Linear_func:output:0"}, {{"T", DT_FLOAT}}}},
{{"out", "Exp:y:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("out_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "in" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Linear_func" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("in", node.input(0));
} else if (node.name() == "Exp" && ++count) {
EXPECT_EQ("Exp", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("Linear_func", node.input(0));
} else if (node.name() == "out_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("Exp", node.input(0));
}
}
EXPECT_EQ(4, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithoutInput) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"GenerateTwo",
{},
{"o: T"},
{"T: {float, double}"},
{{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"o"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("o_RetVal", item.output(0).node_name);
EXPECT_EQ(3, item.function_body().node_size());
const NodeDef &two = item.function_body().node(0);
EXPECT_EQ("two", two.name());
EXPECT_EQ(0, two.input_size());
const NodeDef &cast = item.function_body().node(1);
EXPECT_EQ("o", cast.name());
EXPECT_EQ(1, cast.input_size());
EXPECT_EQ("two", cast.input(0));
const NodeDef &retval = item.function_body().node(2);
EXPECT_EQ("o_RetVal", retval.name());
EXPECT_EQ(1, retval.input_size());
EXPECT_EQ("o", retval.input(0));
}
TEST_F(FunctionsTest, FromFunctionDefWithSideEffectfulOps) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Define(
"SideEffects",
{"x: Ref(float)"},
{},
{},
{{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one"}, {{"T", DT_FLOAT}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SideEffects", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
const auto &opts = item.optimization_options();
EXPECT_FALSE(opts.allow_pruning_stateful_and_dataset_ops);
}
TEST_F(FunctionsTest, FromFunctionDefWithControlOutputs) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Create(
"WithControlOutputs", {"x: Ref(float)"}, {}, {},
{
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
},
{}, {{"side_effects", "update"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("WithControlOutputs", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
ASSERT_EQ(1, item.keep_ops.size());
EXPECT_EQ("update", item.keep_ops[0]);
ASSERT_EQ(1, item.control_output_size());
const ControlOutput &ctrl = item.control_outputs()[0];
EXPECT_EQ("side_effects", ctrl.output_name);
EXPECT_EQ("update", ctrl.node_name);
} |
#include <algorithm>
#include <vector>
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
Scope::Scope(Impl* impl) : impl_(impl) {}
Scope::Scope(const Scope& other) : impl_(new Impl(*other.impl())) {}
Scope::~Scope() {}
Scope& Scope::operator=(const Scope& other) {
impl_.reset(new Impl(*other.impl_));
return *this;
}
namespace {
const char kScopeSeparator[] = "/";
const char kSuffixSeparator[] = "_";
}
Scope::Impl::Impl(Graph* graph, Status* status, NameMap* name_map,
ShapeRefiner* refiner, bool disable_shape_inference)
: graph_(graph),
status_(status),
name_map_(name_map),
refiner_(refiner),
scope_used_(nullptr),
colocation_constraints_(),
disable_shape_inference_(disable_shape_inference) {}
Scope::Impl::Impl(const std::shared_ptr<Graph>& graph,
const std::shared_ptr<Status>& status,
const std::shared_ptr<NameMap>& name_map,
const std::shared_ptr<ShapeRefiner>& refiner)
: graph_(graph),
status_(status),
name_map_(name_map),
refiner_(refiner),
scope_used_(nullptr),
colocation_constraints_(),
disable_shape_inference_(refiner_ == nullptr) {}
Scope Scope::NewRootScope() {
Graph* graph = new Graph(OpRegistry::Global());
ShapeRefiner* refiner =
new ShapeRefiner(graph->versions(), graph->op_registry());
return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner,
false));
}
Scope Scope::DisabledShapeInferenceScope() {
Graph* graph = new Graph(OpRegistry::Global());
ShapeRefiner* refiner =
new ShapeRefiner(graph->versions(), graph->op_registry());
return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner,
true));
}
Scope::Impl::Impl(const Scope& other, Tags::ScopeName, const string& name,
bool copy_names)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(copy_names ? other.impl()->name_map_
: std::shared_ptr<NameMap>(new NameMap)),
refiner_(other.impl()->refiner_),
scope_used_(nullptr),
control_deps_(other.impl()->control_deps_),
name_(name),
op_name_(""),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::OpName, const string& name,
const string& op_name)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(name),
op_name_(op_name),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::ControlDeps,
std::vector<Operation> control_deps, bool clear_control_deps)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(
clear_control_deps
? std::vector<Operation>()
: (control_deps.insert(control_deps.begin(),
other.impl()->control_deps_.begin(),
other.impl()->control_deps_.end()),
control_deps)),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::Device, const string& device)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(device),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::SingleUseScope,
const string& op_name)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(new bool(false)),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(op_name),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::ExitOnError)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(true),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::KernelLabel,
const string& kernel_label)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(kernel_label),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::Colocate,
const Operation& colocate_with_op, bool clear_colocations)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(
clear_colocations
? std::unordered_set<string>()
: other.impl()->GetColocationConstraints(colocate_with_op)),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::AssignedDevice,
const string& assigned_device)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(assigned_device),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::XlaCluster,
const string& xla_cluster)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(xla_cluster),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
std::unordered_set<string> Scope::Impl::GetColocationConstraints(
const Operation& colocate_with_op) const {
std::unordered_set<string> current_constraints(colocation_constraints_);
const AttrSlice attrs = colocate_with_op.node()->attrs();
std::vector<string> node_constraints;
if (TryGetNodeAttr(attrs, kColocationAttrName, &node_constraints)) {
for (const string& entry : node_constraints) {
StringPiece s(entry);
if (absl::ConsumePrefix(&s, kColocationGroupPrefix)) {
current_constraints.emplace(s);
}
}
} else {
current_constraints.insert(colocate_with_op.node()->name());
}
return current_constraints;
}
bool Scope::ok() const { return impl()->status_->ok(); }
Graph* Scope::graph() const { return impl()->graph_.get(); }
std::shared_ptr<Graph> Scope::graph_as_shared_ptr() const {
return impl()->graph_;
}
Status Scope::status() const { return *impl()->status_; }
const std::vector<Operation>& Scope::control_deps() const {
return impl()->control_deps_;
}
void Scope::UpdateStatus(const Status& s) const {
impl()->status_->Update(s);
if (impl()->exit_on_error_ && !ok()) {
LOG(FATAL) << *impl()->status_;
}
}
Status Scope::ToGraphDef(GraphDef* gdef, bool include_debug_info) const {
if (!ok()) {
return *impl()->status_;
}
graph()->ToGraphDef(gdef, true, include_debug_info);
return absl::OkStatus();
}
Status Scope::ToGraph(Graph* g, GraphConstructorOptions opts) const {
if (ok()) {
GraphDef graph_def;
graph()->ToGraphDef(&graph_def);
UpdateStatus(ConvertGraphDefToGraph(opts, std::move(graph_def), g));
}
return *impl()->status_;
}
void Scope::UpdateBuilder(NodeBuilder* builder) const {
std::vector<Node*> control_inputs;
for (const auto& op : impl()->control_deps_) {
control_inputs.push_back(op.node());
}
builder->ControlInputs(control_inputs);
if (!impl()->kernel_label_.empty()) {
builder->Attr("_kernel", impl()->kernel_label_);
}
if (!impl()->colocation_constraints_.empty()) {
std::vector<string> constraints(impl()->colocation_constraints_.begin(),
impl()->colocation_constraints_.end());
std::sort(constraints.begin(), constraints.end());
std::transform(constraints.begin(), constraints.end(), constraints.begin(),
[](const string& s) {
return strings::StrCat(kColocationGroupPrefix, s);
});
builder->Attr(kColocationAttrName, constraints);
}
if (!impl()->device_.empty()) {
builder->Device(impl()->device_);
}
if (!impl()->assigned_device_.empty()) {
builder->AssignedDevice(impl()->assigned_device_);
}
if (!impl()->xla_cluster_.empty()) {
builder->XlaCluster(impl()->xla_cluster_);
}
}
string Scope::Impl::GetUniqueName(const string& prefix,
bool check_single_use) const {
if (check_single_use && single_use_scope()) {
if (*scope_used_) {
*status_ =
errors::AlreadyExists(prefix, " already exists in the current scope");
return "";
}
*scope_used_ = true;
return prefix;
}
auto entry = name_map_->find(prefix);
if (entry == name_map_->end()) {
name_map_->insert({prefix, 0});
return prefix;
}
string unique_name;
do {
unique_name = strings::StrCat(prefix, kSuffixSeparator, ++entry->second);
} while (name_map_->find(unique_name) != name_map_->end());
name_map_->insert({unique_name, 0});
return unique_name;
}
string Scope::Impl::GetNameForOp(const string& default_name) const {
const string unique_name =
GetUniqueName(default_name, true );
const string sep =
name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return strings::StrCat(name_, sep, unique_name);
}
string Scope::GetUniqueNameForOp(const string& default_name) const {
if (impl()->single_use_scope()) {
if (impl()->op_name_.empty() || *impl()->scope_used_) {
*impl()->status_ =
errors::InvalidArgument("Cannot get a unique name in this scope");
return "";
}
*impl()->scope_used_ = true;
return impl()->op_name_;
}
return impl()->op_name_.empty() ? impl()->GetNameForOp(default_name)
: impl()->GetNameForOp(impl()->op_name_);
}
Scope Scope::NewSubScope(const string& child_scope_name) const {
if (child_scope_name.empty()) {
return Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->name_,
true ));
}
const string unique_name =
impl()->GetUniqueName(child_scope_name, false );
const string sep =
impl()->name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return Scope(new Impl(*this, Impl::Tags::ScopeName(),
strings::StrCat(impl()->name_, sep, unique_name),
false ));
}
Scope Scope::WithOpNameImpl(const string& op_name) const {
if (impl()->single_use_scope()) {
UpdateStatus(errors::InvalidArgument("Cannot set op name ", op_name,
" on this scope"));
return *this;
}
return Scope(new Impl(*this, Impl::Tags::OpName(), impl()->name_, op_name));
}
Scope Scope::WithControlDependencies(
const absl::Span<const Operation> control_deps) const {
return Scope(
new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(control_deps.begin(), control_deps.end()),
false));
}
Scope Scope::WithControlDependencies(const Output& control_dep) const {
return Scope(new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(1, control_dep.op()),
false));
}
Scope Scope::WithNoControlDependencies() const {
return Scope(new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(),
true));
}
Scope Scope::WithDevice(const string& device) const {
return Scope(new Impl(*this, Impl::Tags::Device(), device));
}
Scope Scope::WithAssignedDevice(const string& assigned_device) const {
return Scope(new Impl(*this, Impl::Tags::AssignedDevice(), assigned_device));
}
Scope Scope::WithXlaCluster(const string& xla_cluster) const {
return Scope(new Impl(*this, Impl::Tags::XlaCluster(), xla_cluster));
}
Scope Scope::ColocateWith(const Operation& op) const {
return Scope(new Impl(*this, Impl::Tags::Colocate(), op,
false));
}
Scope Scope::ClearColocation() const {
return Scope(new Impl(*this, Impl::Tags::Colocate(), Operation(),
true));
}
Scope Scope::ExitOnError() const {
return Scope(new Impl(*this, Impl::Tags::ExitOnError()));
}
Scope Scope::WithKernelLabel(const string& kernel_label) const {
return Scope(new Impl(*this, Impl::Tags::KernelLabel(), kernel_label));
}
CompositeOpScopes Scope::GetCompositeOpScopes(
const string& composite_op_name) const {
if (impl()->op_name_.empty() && composite_op_name.empty()) {
UpdateStatus(errors::InvalidArgument(
"Cannot create composite op scopes with empty name"));
return {*this, *this};
}
if (!impl()->single_use_scope()) {
Scope child = NewSubScope(impl()->op_name_.empty() ? composite_op_name
: impl()->op_name_);
const string child_op_sep = impl()->name_.empty() ? "" : kSuffixSeparator;
const string child_name =
strings::StrCat(impl()->name_, child_op_sep, child.impl()->name_);
return {child,
Scope(new Impl(child, Impl::Tags::SingleUseScope(), child_name))};
} else {
return {Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->op_name_,
true )),
*this};
}
}
Status Scope::DoShapeInference(Node* node) const {
if (impl_->disable_shape_inference_) return absl::OkStatus();
return impl_->refiner_->AddNode(node);
}
class InternalScope {
public:
static Scope NewScope(Graph* graph, Status* status, ShapeRefiner* refiner) {
Scope::Impl::NameMap* name_map = new Scope::Impl::NameMap;
for (const Node* node : graph->nodes()) {
const string& name = node->name();
(*name_map)[name] = 0;
size_t idx = -1;
while ((idx = name.find(kScopeSeparator, idx + 1)) != string::npos) {
(*name_map)[name.substr(0, idx)] = 0;
}
}
return Scope(new Scope::Impl(
std::shared_ptr<Graph>(graph, [](Graph*) {}),
std::shared_ptr<Status>(status, [](Status*) {}),
std::shared_ptr<Scope::Impl::NameMap>(name_map),
std::shared_ptr<ShapeRefiner>(refiner, [](ShapeRefiner*) {})));
}
};
Scope NewInternalScope(Graph* graph, Status* status, ShapeRefiner* refiner) {
return InternalScope::NewScope(graph, status, refiner);
}
Status CreateOutputWithScope(string op_name,
absl::Span<const ::tensorflow::Input> inputs,
const Scope& scope, Output* output) {
TF_RETURN_IF_ERROR(scope.status());
const auto unique_name = scope.GetUniqueNameForOp(op_name);
auto builder = ::tensorflow::NodeBuilder(unique_name, op_name);
for (const auto& input : inputs) {
TF_RETURN_IF_ERROR(scope.status());
builder = builder.Input(input.node());
}
::tensorflow::Node* ret;
scope.UpdateBuilder(&builder);
TF_RETURN_IF_ERROR(scope.status());
scope.UpdateStatus(builder.Finalize(scope.graph(), &ret));
TF_RETURN_IF_ERROR(scope.status());
*output = Output(ret, 0);
return absl::OkStatus();
}
} | #include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ScopeTest, BasicNames) {
Scope root = Scope::NewRootScope();
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add");
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_1");
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_2");
EXPECT_EQ(root.GetUniqueNameForOp("mul"), "mul");
}
TEST(ScopeTest, OpAndScopeNameCollision) {
Scope root = Scope::NewRootScope();
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo");
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_1");
EXPECT_EQ(root.GetUniqueNameForOp("foo_1"), "foo_1_1");
EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2");
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_3");
EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2_1");
}
TEST(ScopeTest, HierarchicalNames) {
Scope root = Scope::NewRootScope();
Scope child = root.NewSubScope("child");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add_1");
EXPECT_EQ(child.GetUniqueNameForOp("mul"), "child/mul");
Scope child_1 = root.NewSubScope("child");
EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add");
EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add_1");
EXPECT_EQ(child_1.GetUniqueNameForOp("mul"), "child_1/mul");
Scope c_c = root.NewSubScope("c").NewSubScope("c");
EXPECT_EQ(c_c.GetUniqueNameForOp("add"), "c/c/add");
Scope c_1 = root.NewSubScope("c");
Scope c_1_c = c_1.NewSubScope("c");
EXPECT_EQ(c_1_c.GetUniqueNameForOp("add"), "c_1/c/add");
Scope c_1_c_1 = c_1.NewSubScope("c");
EXPECT_EQ(c_1_c_1.GetUniqueNameForOp("add"), "c_1/c_1/add");
EXPECT_EQ(root.NewSubScope("").NewSubScope("").GetUniqueNameForOp("d"), "d");
EXPECT_EQ(root.NewSubScope("").GetUniqueNameForOp("d"), "d_1");
EXPECT_EQ(root.GetUniqueNameForOp("d"), "d_2");
}
TEST(ScopeTest, ScopeAndOpNames) {
Scope root = Scope::NewRootScope();
Scope child = root.NewSubScope("child");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add");
EXPECT_EQ(root.GetUniqueNameForOp("child"), "child_1");
EXPECT_EQ(root.NewSubScope("child").GetUniqueNameForOp("p"), "child_2/p");
}
namespace {
string LastOp(const Scope& scope) { return scope.GetUniqueNameForOp("Last"); }
std::vector<string> AnotherCompositeOp(const Scope& scope) {
auto cop_scopes = scope.GetCompositeOpScopes("another_cop");
const string c1 = cop_scopes.child.GetUniqueNameForOp("c1");
const string c2 = cop_scopes.child.GetUniqueNameForOp("mul");
return {c1, c2, LastOp(cop_scopes.last)};
}
std::vector<string> LinearOp(const Scope& scope) {
auto cop_scopes = scope.GetCompositeOpScopes("linear");
Scope linear = cop_scopes.child;
const string mul_op_name = linear.GetUniqueNameForOp("mul");
const string bias_add_op_name = linear.GetUniqueNameForOp("bias_add");
auto cop_names = AnotherCompositeOp(cop_scopes.last);
return {mul_op_name, bias_add_op_name, cop_names[0], cop_names[1],
cop_names[2]};
}
}
TEST(ScopeTest, CompositeOp) {
Scope root = Scope::NewRootScope();
const auto names1 = LinearOp(root);
EXPECT_EQ(names1[0], "linear/mul");
EXPECT_EQ(names1[1], "linear/bias_add");
EXPECT_EQ(names1[2], "linear/c1");
EXPECT_EQ(names1[3], "linear/mul_1");
EXPECT_EQ(names1[4], "linear");
EXPECT_EQ(root.GetUniqueNameForOp("linear"), "linear_1");
const auto names2 = LinearOp(root);
EXPECT_EQ(names2[0], "linear_2/mul");
EXPECT_EQ(names2[1], "linear_2/bias_add");
EXPECT_EQ(names2[2], "linear_2/c1");
EXPECT_EQ(names2[3], "linear_2/mul_1");
EXPECT_EQ(names2[4], "linear_2");
const auto names3 = LinearOp(root.WithOpName("c"));
EXPECT_EQ(names3[0], "c/mul");
EXPECT_EQ(names3[1], "c/bias_add");
EXPECT_EQ(names3[2], "c/c1");
EXPECT_EQ(names3[3], "c/mul_1");
EXPECT_EQ(names3[4], "c");
}
TEST(ScopeTest, SingleUseScope) {
Scope root = Scope::NewRootScope();
auto cop_scopes = root.GetCompositeOpScopes("cop");
EXPECT_EQ(cop_scopes.last.GetUniqueNameForOp("foo"), "cop");
cop_scopes.last.GetUniqueNameForOp("foo");
EXPECT_FALSE(cop_scopes.last.ok());
}
TEST(ScopeTest, ControlDeps) {
Scope root = Scope::NewRootScope();
auto c1 = Operation();
auto c2 = Operation();
Scope c = root.WithControlDependencies({c1, c2});
EXPECT_EQ(c.control_deps().size(), 2);
Scope c_c = c.WithControlDependencies({Operation()});
EXPECT_EQ(c_c.control_deps().size(), 3);
}
TEST(ScopeTest, CreateOutput) {
Scope root = Scope::NewRootScope();
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output add;
ASSERT_TRUE(
CreateOutputWithScope("Add", {a, a}, root.WithOpName("add"), &add).ok());
EXPECT_EQ(add.node()->name(), "add");
EXPECT_EQ(add.node()->type_string(), "Add");
}
} | Scope Scope::WithOpNameImpl(const string& op_name) const {
if (impl()->single_use_scope()) {
UpdateStatus(errors::InvalidArgument("Cannot set op name ", op_name,
" on this scope"));
return *this;
} | TEST(ScopeTest, CompositeOp) {
Scope root = Scope::NewRootScope();
const auto names1 = LinearOp(root);
EXPECT_EQ(names1[0], "linear/mul");
EXPECT_EQ(names1[1], "linear/bias_add");
EXPECT_EQ(names1[2], "linear/c1");
EXPECT_EQ(names1[3], "linear/mul_1");
EXPECT_EQ(names1[4], "linear");
EXPECT_EQ(root.GetUniqueNameForOp("linear"), "linear_1");
const auto names2 = LinearOp(root);
EXPECT_EQ(names2[0], "linear_2/mul");
EXPECT_EQ(names2[1], "linear_2/bias_add");
EXPECT_EQ(names2[2], "linear_2/c1");
EXPECT_EQ(names2[3], "linear_2/mul_1");
EXPECT_EQ(names2[4], "linear_2");
const auto names3 = LinearOp(root.WithOpName("c"));
EXPECT_EQ(names3[0], "c/mul");
EXPECT_EQ(names3[1], "c/bias_add");
EXPECT_EQ(names3[2], "c/c1");
EXPECT_EQ(names3[3], "c/mul_1");
EXPECT_EQ(names3[4], "c");
}
TEST(ScopeTest, SingleUseScope) {
Scope root = Scope::NewRootScope();
auto cop_scopes = root.GetCompositeOpScopes("cop");
EXPECT_EQ(cop_scopes.last.GetUniqueNameForOp("foo"), "cop");
cop_scopes.last.GetUniqueNameForOp("foo");
EXPECT_FALSE(cop_scopes.last.ok());
} |
#ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_TYPE_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_TYPE_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/memory.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeType;
class TypeTypeView;
namespace common_internal {
struct TypeTypeData;
}
class TypeType final {
public:
using view_alternative_type = TypeTypeView;
static constexpr TypeKind kKind = TypeKind::kType;
static constexpr absl::string_view kName = "type";
explicit TypeType(TypeTypeView type);
TypeType(MemoryManagerRef memory_manager, Type parameter);
TypeType() = default;
TypeType(const TypeType&) = default;
TypeType(TypeType&&) = default;
TypeType& operator=(const TypeType&) = default;
TypeType& operator=(TypeType&&) = default;
constexpr TypeKind kind() const { return kKind; }
constexpr absl::string_view name() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return kName;
}
absl::Span<const Type> parameters() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
std::string DebugString() const { return std::string(name()); }
constexpr void swap(TypeType& other) noexcept {
using std::swap;
swap(data_, other.data_);
}
Shared<const common_internal::TypeTypeData> data_;
};
inline constexpr void swap(TypeType& lhs, TypeType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(const TypeType&, const TypeType&) {
return true;
}
inline constexpr bool operator!=(const TypeType& lhs, const TypeType& rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, TypeType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const TypeType& type) {
return out << type.DebugString();
}
class TypeTypeView final {
public:
using alternative_type = TypeType;
static constexpr TypeKind kKind = TypeType::kKind;
static constexpr absl::string_view kName = TypeType::kName;
TypeTypeView(const TypeType& type ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept
: data_(type.data_) {}
TypeTypeView& operator=(const TypeType& type ABSL_ATTRIBUTE_LIFETIME_BOUND
ABSL_ATTRIBUTE_UNUSED) {
data_ = type.data_;
return *this;
}
TypeTypeView& operator=(TypeType&&) = delete;
TypeTypeView() = default;
TypeTypeView(const TypeTypeView&) = default;
TypeTypeView(TypeTypeView&&) = default;
TypeTypeView& operator=(const TypeTypeView&) = default;
TypeTypeView& operator=(TypeTypeView&&) = default;
constexpr TypeKind kind() const { return kKind; }
constexpr absl::string_view name() const { return kName; }
absl::Span<const Type> parameters() const;
std::string DebugString() const { return std::string(name()); }
constexpr void swap(TypeTypeView& other) noexcept {
using std::swap;
swap(data_, other.data_);
}
SharedView<const common_internal::TypeTypeData> data_;
};
inline constexpr void swap(TypeTypeView& lhs, TypeTypeView& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(TypeTypeView, TypeTypeView) { return true; }
inline constexpr bool operator!=(TypeTypeView lhs, TypeTypeView rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, TypeTypeView) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, TypeTypeView type) {
return out << type.DebugString();
}
inline TypeType::TypeType(TypeTypeView type) : data_(type.data_) {}
}
#endif | #include "common/type.h"
#include <sstream>
#include "absl/hash/hash.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/native_type.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
TEST(TypeType, Kind) {
EXPECT_EQ(TypeType().kind(), TypeType::kKind);
EXPECT_EQ(Type(TypeType()).kind(), TypeType::kKind);
}
TEST(TypeType, Name) {
EXPECT_EQ(TypeType().name(), TypeType::kName);
EXPECT_EQ(Type(TypeType()).name(), TypeType::kName);
}
TEST(TypeType, DebugString) {
{
std::ostringstream out;
out << TypeType();
EXPECT_EQ(out.str(), TypeType::kName);
}
{
std::ostringstream out;
out << Type(TypeType());
EXPECT_EQ(out.str(), TypeType::kName);
}
}
TEST(TypeType, Hash) {
EXPECT_EQ(absl::HashOf(TypeType()), absl::HashOf(TypeType()));
}
TEST(TypeType, Equal) {
EXPECT_EQ(TypeType(), TypeType());
EXPECT_EQ(Type(TypeType()), TypeType());
EXPECT_EQ(TypeType(), Type(TypeType()));
EXPECT_EQ(Type(TypeType()), Type(TypeType()));
}
TEST(TypeType, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(TypeType()), NativeTypeId::For<TypeType>());
EXPECT_EQ(NativeTypeId::Of(Type(TypeType())), NativeTypeId::For<TypeType>());
}
TEST(TypeType, InstanceOf) {
EXPECT_TRUE(InstanceOf<TypeType>(TypeType()));
EXPECT_TRUE(InstanceOf<TypeType>(Type(TypeType())));
}
TEST(TypeType, Cast) {
EXPECT_THAT(Cast<TypeType>(TypeType()), An<TypeType>());
EXPECT_THAT(Cast<TypeType>(Type(TypeType())), An<TypeType>());
}
TEST(TypeType, As) {
EXPECT_THAT(As<TypeType>(TypeType()), Ne(absl::nullopt));
EXPECT_THAT(As<TypeType>(Type(TypeType())), Ne(absl::nullopt));
}
TEST(TypeTypeView, Kind) {
EXPECT_EQ(TypeTypeView().kind(), TypeTypeView::kKind);
EXPECT_EQ(TypeView(TypeTypeView()).kind(), TypeTypeView::kKind);
}
TEST(TypeTypeView, Name) {
EXPECT_EQ(TypeTypeView().name(), TypeTypeView::kName);
EXPECT_EQ(TypeView(TypeTypeView()).name(), TypeTypeView::kName);
}
TEST(TypeTypeView, DebugString) {
{
std::ostringstream out;
out << TypeTypeView();
EXPECT_EQ(out.str(), TypeTypeView::kName);
}
{
std::ostringstream out;
out << TypeView(TypeTypeView());
EXPECT_EQ(out.str(), TypeTypeView::kName);
}
}
TEST(TypeTypeView, Hash) {
EXPECT_EQ(absl::HashOf(TypeTypeView()), absl::HashOf(TypeTypeView()));
EXPECT_EQ(absl::HashOf(TypeTypeView()), absl::HashOf(TypeType()));
}
TEST(TypeTypeView, Equal) {
EXPECT_EQ(TypeTypeView(), TypeTypeView());
EXPECT_EQ(TypeView(TypeTypeView()), TypeTypeView());
EXPECT_EQ(TypeTypeView(), TypeView(TypeTypeView()));
EXPECT_EQ(TypeView(TypeTypeView()), TypeView(TypeTypeView()));
EXPECT_EQ(TypeTypeView(), TypeType());
EXPECT_EQ(TypeView(TypeTypeView()), TypeType());
EXPECT_EQ(TypeView(TypeTypeView()), Type(TypeType()));
EXPECT_EQ(TypeType(), TypeTypeView());
EXPECT_EQ(TypeType(), TypeTypeView());
EXPECT_EQ(TypeType(), TypeView(TypeTypeView()));
EXPECT_EQ(Type(TypeType()), TypeView(TypeTypeView()));
EXPECT_EQ(TypeTypeView(), TypeType());
}
TEST(TypeTypeView, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(TypeTypeView()),
NativeTypeId::For<TypeTypeView>());
EXPECT_EQ(NativeTypeId::Of(TypeView(TypeTypeView())),
NativeTypeId::For<TypeTypeView>());
}
TEST(TypeTypeView, InstanceOf) {
EXPECT_TRUE(InstanceOf<TypeTypeView>(TypeTypeView()));
EXPECT_TRUE(InstanceOf<TypeTypeView>(TypeView(TypeTypeView())));
}
TEST(TypeTypeView, Cast) {
EXPECT_THAT(Cast<TypeTypeView>(TypeTypeView()), An<TypeTypeView>());
EXPECT_THAT(Cast<TypeTypeView>(TypeView(TypeTypeView())), An<TypeTypeView>());
}
TEST(TypeTypeView, As) {
EXPECT_THAT(As<TypeTypeView>(TypeTypeView()), Ne(absl::nullopt));
EXPECT_THAT(As<TypeTypeView>(TypeView(TypeTypeView())), Ne(absl::nullopt));
}
}
} | inline std::ostream& operator<<(std::ostream& out, const TypeType& type) {
return out << type.DebugString();
} | TEST(TypeType, DebugString) {
{
std::ostringstream out;
out << TypeType();
EXPECT_EQ(out.str(), TypeType::kName);
}
{
std::ostringstream out;
out << Type(TypeType());
EXPECT_EQ(out.str(), TypeType::kName);
}
}
TEST(TypeTypeView, DebugString) {
{
std::ostringstream out;
out << TypeTypeView();
EXPECT_EQ(out.str(), TypeTypeView::kName);
}
{
std::ostringstream out;
out << TypeView(TypeTypeView());
EXPECT_EQ(out.str(), TypeTypeView::kName);
}
} |
#ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_BOOL_WRAPPER_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_BOOL_WRAPPER_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class BoolWrapperType;
class BoolWrapperTypeView;
class BoolWrapperType final {
public:
using view_alternative_type = BoolWrapperTypeView;
static constexpr TypeKind kKind = TypeKind::kBoolWrapper;
static constexpr absl::string_view kName = "google.protobuf.BoolValue";
explicit BoolWrapperType(BoolWrapperTypeView);
BoolWrapperType() = default;
BoolWrapperType(const BoolWrapperType&) = default;
BoolWrapperType(BoolWrapperType&&) = default;
BoolWrapperType& operator=(const BoolWrapperType&) = default;
BoolWrapperType& operator=(BoolWrapperType&&) = default;
constexpr TypeKind kind() const { return kKind; }
constexpr absl::string_view name() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return kName;
}
absl::Span<const Type> parameters() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return {};
}
std::string DebugString() const { return std::string(name()); }
constexpr void swap(BoolWrapperType&) noexcept {}
};
inline constexpr void swap(BoolWrapperType& lhs,
BoolWrapperType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(BoolWrapperType, BoolWrapperType) {
return true;
}
inline constexpr bool operator!=(BoolWrapperType lhs, BoolWrapperType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, BoolWrapperType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out,
const BoolWrapperType& type) {
return out << type.DebugString();
}
class BoolWrapperTypeView final {
public:
using alternative_type = BoolWrapperType;
static constexpr TypeKind kKind = BoolWrapperType::kKind;
static constexpr absl::string_view kName = BoolWrapperType::kName;
BoolWrapperTypeView(const BoolWrapperType& type ABSL_ATTRIBUTE_LIFETIME_BOUND
ABSL_ATTRIBUTE_UNUSED) noexcept {}
BoolWrapperTypeView& operator=(
const BoolWrapperType& type ABSL_ATTRIBUTE_LIFETIME_BOUND
ABSL_ATTRIBUTE_UNUSED) {
return *this;
}
BoolWrapperTypeView& operator=(BoolWrapperType&&) = delete;
BoolWrapperTypeView() = default;
BoolWrapperTypeView(const BoolWrapperTypeView&) = default;
BoolWrapperTypeView(BoolWrapperTypeView&&) = default;
BoolWrapperTypeView& operator=(const BoolWrapperTypeView&) = default;
BoolWrapperTypeView& operator=(BoolWrapperTypeView&&) = default;
constexpr TypeKind kind() const { return kKind; }
constexpr absl::string_view name() const { return kName; }
absl::Span<const Type> parameters() const { return {}; }
std::string DebugString() const { return std::string(name()); }
constexpr void swap(BoolWrapperTypeView&) noexcept {}
};
inline constexpr void swap(BoolWrapperTypeView& lhs,
BoolWrapperTypeView& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(BoolWrapperTypeView, BoolWrapperTypeView) {
return true;
}
inline constexpr bool operator!=(BoolWrapperTypeView lhs,
BoolWrapperTypeView rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, BoolWrapperTypeView) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, BoolWrapperTypeView type) {
return out << type.DebugString();
}
inline BoolWrapperType::BoolWrapperType(BoolWrapperTypeView) {}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/native_type.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
TEST(BoolWrapperType, Kind) {
EXPECT_EQ(BoolWrapperType().kind(), BoolWrapperType::kKind);
EXPECT_EQ(Type(BoolWrapperType()).kind(), BoolWrapperType::kKind);
}
TEST(BoolWrapperType, Name) {
EXPECT_EQ(BoolWrapperType().name(), BoolWrapperType::kName);
EXPECT_EQ(Type(BoolWrapperType()).name(), BoolWrapperType::kName);
}
TEST(BoolWrapperType, DebugString) {
{
std::ostringstream out;
out << BoolWrapperType();
EXPECT_EQ(out.str(), BoolWrapperType::kName);
}
{
std::ostringstream out;
out << Type(BoolWrapperType());
EXPECT_EQ(out.str(), BoolWrapperType::kName);
}
}
TEST(BoolWrapperType, Hash) {
EXPECT_EQ(absl::HashOf(BoolWrapperType()), absl::HashOf(BoolWrapperType()));
}
TEST(BoolWrapperType, Equal) {
EXPECT_EQ(BoolWrapperType(), BoolWrapperType());
EXPECT_EQ(Type(BoolWrapperType()), BoolWrapperType());
EXPECT_EQ(BoolWrapperType(), Type(BoolWrapperType()));
EXPECT_EQ(Type(BoolWrapperType()), Type(BoolWrapperType()));
}
TEST(BoolWrapperType, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(BoolWrapperType()),
NativeTypeId::For<BoolWrapperType>());
EXPECT_EQ(NativeTypeId::Of(Type(BoolWrapperType())),
NativeTypeId::For<BoolWrapperType>());
}
TEST(BoolWrapperType, InstanceOf) {
EXPECT_TRUE(InstanceOf<BoolWrapperType>(BoolWrapperType()));
EXPECT_TRUE(InstanceOf<BoolWrapperType>(Type(BoolWrapperType())));
}
TEST(BoolWrapperType, Cast) {
EXPECT_THAT(Cast<BoolWrapperType>(BoolWrapperType()), An<BoolWrapperType>());
EXPECT_THAT(Cast<BoolWrapperType>(Type(BoolWrapperType())),
An<BoolWrapperType>());
}
TEST(BoolWrapperType, As) {
EXPECT_THAT(As<BoolWrapperType>(BoolWrapperType()), Ne(absl::nullopt));
EXPECT_THAT(As<BoolWrapperType>(Type(BoolWrapperType())), Ne(absl::nullopt));
}
TEST(BoolWrapperTypeView, Kind) {
EXPECT_EQ(BoolWrapperTypeView().kind(), BoolWrapperTypeView::kKind);
EXPECT_EQ(TypeView(BoolWrapperTypeView()).kind(), BoolWrapperTypeView::kKind);
}
TEST(BoolWrapperTypeView, Name) {
EXPECT_EQ(BoolWrapperTypeView().name(), BoolWrapperTypeView::kName);
EXPECT_EQ(TypeView(BoolWrapperTypeView()).name(), BoolWrapperTypeView::kName);
}
TEST(BoolWrapperTypeView, DebugString) {
{
std::ostringstream out;
out << BoolWrapperTypeView();
EXPECT_EQ(out.str(), BoolWrapperTypeView::kName);
}
{
std::ostringstream out;
out << TypeView(BoolWrapperTypeView());
EXPECT_EQ(out.str(), BoolWrapperTypeView::kName);
}
}
TEST(BoolWrapperTypeView, Hash) {
EXPECT_EQ(absl::HashOf(BoolWrapperTypeView()),
absl::HashOf(BoolWrapperTypeView()));
EXPECT_EQ(absl::HashOf(BoolWrapperTypeView()),
absl::HashOf(BoolWrapperType()));
}
TEST(BoolWrapperTypeView, Equal) {
EXPECT_EQ(BoolWrapperTypeView(), BoolWrapperTypeView());
EXPECT_EQ(TypeView(BoolWrapperTypeView()), BoolWrapperTypeView());
EXPECT_EQ(BoolWrapperTypeView(), TypeView(BoolWrapperTypeView()));
EXPECT_EQ(TypeView(BoolWrapperTypeView()), TypeView(BoolWrapperTypeView()));
EXPECT_EQ(BoolWrapperTypeView(), BoolWrapperType());
EXPECT_EQ(TypeView(BoolWrapperTypeView()), BoolWrapperType());
EXPECT_EQ(TypeView(BoolWrapperTypeView()), Type(BoolWrapperType()));
EXPECT_EQ(BoolWrapperType(), BoolWrapperTypeView());
EXPECT_EQ(BoolWrapperType(), BoolWrapperTypeView());
EXPECT_EQ(BoolWrapperType(), TypeView(BoolWrapperTypeView()));
EXPECT_EQ(Type(BoolWrapperType()), TypeView(BoolWrapperTypeView()));
EXPECT_EQ(BoolWrapperTypeView(), BoolWrapperType());
}
TEST(BoolWrapperTypeView, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(BoolWrapperTypeView()),
NativeTypeId::For<BoolWrapperTypeView>());
EXPECT_EQ(NativeTypeId::Of(TypeView(BoolWrapperTypeView())),
NativeTypeId::For<BoolWrapperTypeView>());
}
TEST(BoolWrapperTypeView, InstanceOf) {
EXPECT_TRUE(InstanceOf<BoolWrapperTypeView>(BoolWrapperTypeView()));
EXPECT_TRUE(InstanceOf<BoolWrapperTypeView>(TypeView(BoolWrapperTypeView())));
}
TEST(BoolWrapperTypeView, Cast) {
EXPECT_THAT(Cast<BoolWrapperTypeView>(BoolWrapperTypeView()),
An<BoolWrapperTypeView>());
EXPECT_THAT(Cast<BoolWrapperTypeView>(TypeView(BoolWrapperTypeView())),
An<BoolWrapperTypeView>());
}
TEST(BoolWrapperTypeView, As) {
EXPECT_THAT(As<BoolWrapperTypeView>(BoolWrapperTypeView()),
Ne(absl::nullopt));
EXPECT_THAT(As<BoolWrapperTypeView>(TypeView(BoolWrapperTypeView())),
Ne(absl::nullopt));
}
}
} | inline constexpr bool operator!=(BoolWrapperType lhs, BoolWrapperType rhs) {
return !operator==(lhs, rhs);
} | TEST(BoolWrapperType, Equal) {
EXPECT_EQ(BoolWrapperType(), BoolWrapperType());
EXPECT_EQ(Type(BoolWrapperType()), BoolWrapperType());
EXPECT_EQ(BoolWrapperType(), Type(BoolWrapperType()));
EXPECT_EQ(Type(BoolWrapperType()), Type(BoolWrapperType()));
} |
#include "eval/public/cel_type_registry.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "base/type_provider.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/value.h"
#include "eval/internal/interop.h"
#include "eval/public/structs/legacy_type_adapter.h"
#include "eval/public/structs/legacy_type_info_apis.h"
#include "eval/public/structs/legacy_type_provider.h"
#include "google/protobuf/descriptor.h"
namespace google::api::expr::runtime {
namespace {
using cel::Type;
using cel::TypeFactory;
class LegacyToModernTypeProviderAdapter : public LegacyTypeProvider {
public:
explicit LegacyToModernTypeProviderAdapter(const LegacyTypeProvider& provider)
: provider_(provider) {}
absl::optional<LegacyTypeAdapter> ProvideLegacyType(
absl::string_view name) const override {
return provider_.ProvideLegacyType(name);
}
absl::optional<const LegacyTypeInfoApis*> ProvideLegacyTypeInfo(
absl::string_view name) const override {
return provider_.ProvideLegacyTypeInfo(name);
}
absl::optional<const LegacyAnyPackingApis*> ProvideLegacyAnyPackingApis(
absl::string_view name) const override {
return provider_.ProvideLegacyAnyPackingApis(name);
}
private:
const LegacyTypeProvider& provider_;
};
void AddEnumFromDescriptor(const google::protobuf::EnumDescriptor* desc,
CelTypeRegistry& registry) {
std::vector<CelTypeRegistry::Enumerator> enumerators;
enumerators.reserve(desc->value_count());
for (int i = 0; i < desc->value_count(); i++) {
enumerators.push_back({desc->value(i)->name(), desc->value(i)->number()});
}
registry.RegisterEnum(desc->full_name(), std::move(enumerators));
}
}
CelTypeRegistry::CelTypeRegistry() = default;
void CelTypeRegistry::Register(const google::protobuf::EnumDescriptor* enum_descriptor) {
AddEnumFromDescriptor(enum_descriptor, *this);
}
void CelTypeRegistry::RegisterEnum(absl::string_view enum_name,
std::vector<Enumerator> enumerators) {
modern_type_registry_.RegisterEnum(enum_name, std::move(enumerators));
}
void CelTypeRegistry::RegisterTypeProvider(
std::unique_ptr<LegacyTypeProvider> provider) {
legacy_type_providers_.push_back(
std::shared_ptr<const LegacyTypeProvider>(std::move(provider)));
modern_type_registry_.AddTypeProvider(
std::make_unique<LegacyToModernTypeProviderAdapter>(
*legacy_type_providers_.back()));
}
std::shared_ptr<const LegacyTypeProvider>
CelTypeRegistry::GetFirstTypeProvider() const {
if (legacy_type_providers_.empty()) {
return nullptr;
}
return legacy_type_providers_[0];
}
absl::optional<LegacyTypeAdapter> CelTypeRegistry::FindTypeAdapter(
absl::string_view fully_qualified_type_name) const {
for (const auto& provider : legacy_type_providers_) {
auto maybe_adapter = provider->ProvideLegacyType(fully_qualified_type_name);
if (maybe_adapter.has_value()) {
return maybe_adapter;
}
}
return absl::nullopt;
}
} | #include "eval/public/cel_type_registry.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "base/type_provider.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/type_manager.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/public/structs/legacy_type_adapter.h"
#include "eval/public/structs/legacy_type_provider.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::MemoryManagerRef;
using ::cel::Type;
using ::cel::TypeFactory;
using ::cel::TypeManager;
using ::cel::TypeProvider;
using ::cel::ValueManager;
using testing::Contains;
using testing::Eq;
using testing::Key;
using testing::Optional;
using testing::Pair;
using testing::Truly;
using testing::UnorderedElementsAre;
using cel::internal::IsOkAndHolds;
using cel::internal::StatusIs;
class TestTypeProvider : public LegacyTypeProvider {
public:
explicit TestTypeProvider(std::vector<std::string> types)
: types_(std::move(types)) {}
absl::optional<LegacyTypeAdapter> ProvideLegacyType(
absl::string_view name) const override {
for (const auto& type : types_) {
if (name == type) {
return LegacyTypeAdapter(nullptr, nullptr);
}
}
return absl::nullopt;
}
private:
std::vector<std::string> types_;
};
TEST(CelTypeRegistryTest, RegisterEnum) {
CelTypeRegistry registry;
registry.RegisterEnum("google.api.expr.runtime.TestMessage.TestEnum",
{
{"TEST_ENUM_UNSPECIFIED", 0},
{"TEST_ENUM_1", 10},
{"TEST_ENUM_2", 20},
{"TEST_ENUM_3", 30},
});
EXPECT_THAT(registry.resolveable_enums(),
Contains(Key("google.api.expr.runtime.TestMessage.TestEnum")));
}
TEST(CelTypeRegistryTest, TestRegisterBuiltInEnum) {
CelTypeRegistry registry;
ASSERT_THAT(registry.resolveable_enums(),
Contains(Key("google.protobuf.NullValue")));
}
TEST(CelTypeRegistryTest, TestGetFirstTypeProviderSuccess) {
CelTypeRegistry registry;
registry.RegisterTypeProvider(std::make_unique<TestTypeProvider>(
std::vector<std::string>{"google.protobuf.Int64"}));
registry.RegisterTypeProvider(std::make_unique<TestTypeProvider>(
std::vector<std::string>{"google.protobuf.Any"}));
auto type_provider = registry.GetFirstTypeProvider();
ASSERT_NE(type_provider, nullptr);
ASSERT_TRUE(
type_provider->ProvideLegacyType("google.protobuf.Int64").has_value());
ASSERT_FALSE(
type_provider->ProvideLegacyType("google.protobuf.Any").has_value());
}
TEST(CelTypeRegistryTest, TestGetFirstTypeProviderFailureOnEmpty) {
CelTypeRegistry registry;
auto type_provider = registry.GetFirstTypeProvider();
ASSERT_EQ(type_provider, nullptr);
}
TEST(CelTypeRegistryTest, TestFindTypeAdapterFound) {
CelTypeRegistry registry;
registry.RegisterTypeProvider(std::make_unique<TestTypeProvider>(
std::vector<std::string>{"google.protobuf.Any"}));
auto desc = registry.FindTypeAdapter("google.protobuf.Any");
ASSERT_TRUE(desc.has_value());
}
TEST(CelTypeRegistryTest, TestFindTypeAdapterFoundMultipleProviders) {
CelTypeRegistry registry;
registry.RegisterTypeProvider(std::make_unique<TestTypeProvider>(
std::vector<std::string>{"google.protobuf.Int64"}));
registry.RegisterTypeProvider(std::make_unique<TestTypeProvider>(
std::vector<std::string>{"google.protobuf.Any"}));
auto desc = registry.FindTypeAdapter("google.protobuf.Any");
ASSERT_TRUE(desc.has_value());
}
TEST(CelTypeRegistryTest, TestFindTypeAdapterNotFound) {
CelTypeRegistry registry;
auto desc = registry.FindTypeAdapter("missing.MessageType");
EXPECT_FALSE(desc.has_value());
}
MATCHER_P(TypeNameIs, name, "") {
const Type& type = arg;
*result_listener << "got typename: " << type->name();
return type->name() == name;
}
TEST(CelTypeRegistryTypeProviderTest, Builtins) {
CelTypeRegistry registry;
cel::common_internal::LegacyValueManager value_factory(
MemoryManagerRef::ReferenceCounting(), registry.GetTypeProvider());
ASSERT_OK_AND_ASSIGN(absl::optional<Type> bool_type,
value_factory.FindType("bool"));
EXPECT_THAT(bool_type, Optional(TypeNameIs("bool")));
ASSERT_OK_AND_ASSIGN(absl::optional<Type> timestamp_type,
value_factory.FindType("google.protobuf.Timestamp"));
EXPECT_THAT(timestamp_type,
Optional(TypeNameIs("google.protobuf.Timestamp")));
ASSERT_OK_AND_ASSIGN(absl::optional<Type> int_wrapper_type,
value_factory.FindType("google.protobuf.Int64Value"));
EXPECT_THAT(int_wrapper_type,
Optional(TypeNameIs("google.protobuf.Int64Value")));
ASSERT_OK_AND_ASSIGN(absl::optional<Type> json_struct_type,
value_factory.FindType("google.protobuf.Struct"));
EXPECT_THAT(json_struct_type, Optional(TypeNameIs("map")));
ASSERT_OK_AND_ASSIGN(absl::optional<Type> any_type,
value_factory.FindType("google.protobuf.Any"));
EXPECT_THAT(any_type, Optional(TypeNameIs("google.protobuf.Any")));
}
}
} | std::shared_ptr<const LegacyTypeProvider>
CelTypeRegistry::GetFirstTypeProvider() const {
if (legacy_type_providers_.empty()) {
return nullptr;
}
return legacy_type_providers_[0];
} | TEST(CelTypeRegistryTest, TestGetFirstTypeProviderSuccess) {
CelTypeRegistry registry;
registry.RegisterTypeProvider(std::make_unique<TestTypeProvider>(
std::vector<std::string>{"google.protobuf.Int64"}));
registry.RegisterTypeProvider(std::make_unique<TestTypeProvider>(
std::vector<std::string>{"google.protobuf.Any"}));
auto type_provider = registry.GetFirstTypeProvider();
ASSERT_NE(type_provider, nullptr);
ASSERT_TRUE(
type_provider->ProvideLegacyType("google.protobuf.Int64").has_value());
ASSERT_FALSE(
type_provider->ProvideLegacyType("google.protobuf.Any").has_value());
}
TEST(CelTypeRegistryTest, TestGetFirstTypeProviderFailureOnEmpty) {
CelTypeRegistry registry;
auto type_provider = registry.GetFirstTypeProvider();
ASSERT_EQ(type_provider, nullptr);
} |
#ifndef ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_
#define ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_
#include <cstddef>
#include <cstring>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <class Policy, class = void>
struct common_policy_traits {
using slot_type = typename Policy::slot_type;
using reference = decltype(Policy::element(std::declval<slot_type*>()));
using value_type = typename std::remove_reference<reference>::type;
template <class Alloc, class... Args>
static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
Policy::construct(alloc, slot, std::forward<Args>(args)...);
}
template <class Alloc>
static auto destroy(Alloc* alloc, slot_type* slot) {
return Policy::destroy(alloc, slot);
}
template <class Alloc>
static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
transfer_impl(alloc, new_slot, old_slot, Rank2{});
}
template <class P = Policy>
static auto element(absl::remove_const_t<slot_type>* slot)
-> decltype(P::element(slot)) {
return P::element(slot);
}
template <class P = Policy>
static auto element(const slot_type* slot) -> decltype(P::element(slot)) {
return P::element(slot);
}
static constexpr bool transfer_uses_memcpy() {
return std::is_same<decltype(transfer_impl<std::allocator<char>>(
nullptr, nullptr, nullptr, Rank2{})),
std::true_type>::value;
}
template <class Alloc>
static constexpr bool destroy_is_trivial() {
return std::is_same<decltype(destroy<Alloc>(nullptr, nullptr)),
std::true_type>::value;
}
private:
struct Rank0 {};
struct Rank1 : Rank0 {};
struct Rank2 : Rank1 {};
template <class Alloc, class P = Policy>
static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
slot_type* old_slot,
Rank2) -> decltype(P::transfer(alloc, new_slot,
old_slot)) {
return P::transfer(alloc, new_slot, old_slot);
}
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
template <class Alloc,
typename = std::enable_if_t<absl::is_trivially_relocatable<
std::conditional_t<false, Alloc, value_type>>::value>>
static std::true_type transfer_impl(Alloc*, slot_type* new_slot,
slot_type* old_slot, Rank1) {
std::memcpy(
static_cast<void*>(std::launder(
const_cast<std::remove_const_t<value_type>*>(&element(new_slot)))),
static_cast<const void*>(&element(old_slot)), sizeof(value_type));
return {};
}
#endif
template <class Alloc>
static void transfer_impl(Alloc* alloc, slot_type* new_slot,
slot_type* old_slot, Rank0) {
construct(alloc, new_slot, std::move(element(old_slot)));
destroy(alloc, old_slot);
}
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/internal/common_policy_traits.h"
#include <functional>
#include <memory>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
using ::testing::MockFunction;
using ::testing::AnyNumber;
using ::testing::ReturnRef;
using Slot = int;
struct PolicyWithoutOptionalOps {
using slot_type = Slot;
using key_type = Slot;
using init_type = Slot;
struct PolicyFunctions {
std::function<void(void*, Slot*, Slot)> construct;
std::function<void(void*, Slot*)> destroy;
std::function<Slot&(Slot*)> element;
};
static PolicyFunctions* functions() {
static PolicyFunctions* functions = new PolicyFunctions();
return functions;
}
static void construct(void* a, Slot* b, Slot c) {
functions()->construct(a, b, c);
}
static void destroy(void* a, Slot* b) { functions()->destroy(a, b); }
static Slot& element(Slot* b) { return functions()->element(b); }
};
struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
struct TransferFunctions {
std::function<void(void*, Slot*, Slot*)> transfer;
};
static TransferFunctions* transfer_fn() {
static TransferFunctions* transfer_fn = new TransferFunctions();
return transfer_fn;
}
static void transfer(void* a, Slot* b, Slot* c) {
transfer_fn()->transfer(a, b, c);
}
};
struct PolicyWithMemcpyTransferAndTrivialDestroy : PolicyWithoutOptionalOps {
static std::true_type transfer(void*, Slot*, Slot*) { return {}; }
static std::true_type destroy(void*, Slot*) { return {}; }
};
struct Test : ::testing::Test {
Test() {
PolicyWithoutOptionalOps::functions()->construct = [&](void* a1, Slot* a2,
Slot a3) {
construct.Call(a1, a2, std::move(a3));
};
PolicyWithoutOptionalOps::functions()->destroy = [&](void* a1, Slot* a2) {
destroy.Call(a1, a2);
};
PolicyWithoutOptionalOps::functions()->element = [&](Slot* a1) -> Slot& {
return element.Call(a1);
};
PolicyWithOptionalOps::transfer_fn()->transfer =
[&](void* a1, Slot* a2, Slot* a3) { return transfer.Call(a1, a2, a3); };
}
std::allocator<Slot> alloc;
int a = 53;
MockFunction<void(void*, Slot*, Slot)> construct;
MockFunction<void(void*, Slot*)> destroy;
MockFunction<Slot&(Slot*)> element;
MockFunction<void(void*, Slot*, Slot*)> transfer;
};
TEST_F(Test, construct) {
EXPECT_CALL(construct, Call(&alloc, &a, 53));
common_policy_traits<PolicyWithoutOptionalOps>::construct(&alloc, &a, 53);
}
TEST_F(Test, destroy) {
EXPECT_CALL(destroy, Call(&alloc, &a));
common_policy_traits<PolicyWithoutOptionalOps>::destroy(&alloc, &a);
}
TEST_F(Test, element) {
int b = 0;
EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b));
EXPECT_EQ(&b, &common_policy_traits<PolicyWithoutOptionalOps>::element(&a));
}
TEST_F(Test, without_transfer) {
int b = 42;
EXPECT_CALL(element, Call(&a)).Times(AnyNumber()).WillOnce(ReturnRef(a));
EXPECT_CALL(element, Call(&b)).WillOnce(ReturnRef(b));
EXPECT_CALL(construct, Call(&alloc, &a, b)).Times(AnyNumber());
EXPECT_CALL(destroy, Call(&alloc, &b)).Times(AnyNumber());
common_policy_traits<PolicyWithoutOptionalOps>::transfer(&alloc, &a, &b);
}
TEST_F(Test, with_transfer) {
int b = 42;
EXPECT_CALL(transfer, Call(&alloc, &a, &b));
common_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b);
}
TEST(TransferUsesMemcpy, Basic) {
EXPECT_FALSE(
common_policy_traits<PolicyWithOptionalOps>::transfer_uses_memcpy());
EXPECT_TRUE(
common_policy_traits<
PolicyWithMemcpyTransferAndTrivialDestroy>::transfer_uses_memcpy());
}
TEST(DestroyIsTrivial, Basic) {
EXPECT_FALSE(common_policy_traits<PolicyWithOptionalOps>::destroy_is_trivial<
std::allocator<char>>());
EXPECT_TRUE(common_policy_traits<PolicyWithMemcpyTransferAndTrivialDestroy>::
destroy_is_trivial<std::allocator<char>>());
}
}
}
ABSL_NAMESPACE_END
} | static constexpr bool transfer_uses_memcpy() {
return std::is_same<decltype(transfer_impl<std::allocator<char>>(
nullptr, nullptr, nullptr, Rank2{})),
std::true_type>::value;
} | TEST(TransferUsesMemcpy, Basic) {
EXPECT_FALSE(
common_policy_traits<PolicyWithOptionalOps>::transfer_uses_memcpy());
EXPECT_TRUE(
common_policy_traits<
PolicyWithMemcpyTransferAndTrivialDestroy>::transfer_uses_memcpy());
} |
#include "xla/service/cpu/cpu_layout_assignment.h"
#include <cstdint>
#include <numeric>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/map_util.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace cpu {
namespace {
using std::nullopt;
using std::optional;
using ShouldMakeOperandColMajorCache =
absl::flat_hash_map<const HloInstruction*, bool>;
}
static bool ShouldMakeAllUsersColMajor(const HloInstruction* instruction) {
for (auto* user : instruction->users()) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(*user);
if (!operand_idx || user->operand(*operand_idx) != instruction ||
absl::c_count(user->operands(), instruction) != 1) {
return false;
}
}
return true;
}
static optional<int64_t> ShouldMakeOperandColumnMajor(
ShouldMakeOperandColMajorCache* cache, const HloInstruction& instruction) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(instruction);
if (!operand_idx) {
return nullopt;
}
const HloInstruction* operand = instruction.operand(*operand_idx);
if (operand->opcode() != HloOpcode::kConstant) {
return nullopt;
}
auto it = cache->find(operand);
if (it == cache->end()) {
auto insert_result =
cache->insert({operand, ShouldMakeAllUsersColMajor(operand)});
CHECK(insert_result.second);
it = insert_result.first;
}
return it->second ? operand_idx : nullopt;
}
static Shape RowMajorShape(Shape shape) {
ShapeUtil::ForEachMutableSubshape(
&shape, [](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsArray()) {
return;
}
std::vector<int64_t> dimension_order(subshape->dimensions_size());
std::iota(dimension_order.rbegin(), dimension_order.rend(), 0);
*subshape->mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
});
return shape;
}
static Shape ColMajorShape(const Shape& old_shape) {
Shape new_shape(old_shape);
std::vector<int64_t> dimension_order(new_shape.dimensions_size());
std::iota(dimension_order.begin(), dimension_order.end(), 0);
*new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
return new_shape;
}
static bool OperandsAndResultMustHaveRowMajorLayout(
const HloInstruction& instr,
const TargetMachineFeatures& target_machine_features) {
if (instr.opcode() == HloOpcode::kConvolution) {
return PotentiallyImplementedAsEigenConvolution(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kDot) {
return DotOperandsAndResultMustHaveRowMajorLayout(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kCustomCall) {
return instr.custom_call_target() == "TopK";
}
return false;
}
absl::Status CpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
ShouldMakeOperandColMajorCache cache;
const HloComputation* computation = constraints->computation();
for (auto* instruction : computation->instructions()) {
if (OperandsAndResultMustHaveRowMajorLayout(*instruction,
target_machine_features_)) {
TF_RETURN_IF_ERROR(SetInstructionLayout(
RowMajorShape(instruction->shape()), instruction));
for (int i = 0; i < instruction->operand_count(); i++) {
TF_RETURN_IF_ERROR(SetOperandLayout(
RowMajorShape(instruction->operand(i)->shape()), instruction, i));
}
} else if (optional<int64_t> op_idx =
ShouldMakeOperandColumnMajor(&cache, *instruction)) {
const HloInstruction* op = instruction->operand(*op_idx);
TF_RETURN_IF_ERROR(
SetOperandLayout(ColMajorShape(op->shape()), instruction, *op_idx));
} else if (instruction->opcode() == HloOpcode::kReduceScatter) {
auto ars = Cast<HloReduceScatterInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ars->shape(), ars->scatter_dimension()),
ars));
} else if (instruction->opcode() == HloOpcode::kAllGather) {
auto ag = Cast<HloAllGatherInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ag->shape(), ag->all_gather_dimension()),
ag));
} else {
for (int64_t operand_no = 0; operand_no < instruction->operand_count();
++operand_no) {
if (constraints->OperandLayout(instruction, operand_no) != nullptr) {
continue;
}
if (AnyOperandBufferForwarded(instruction, operand_no)) {
continue;
}
if (!instruction->operand(operand_no)->shape().IsArray()) {
continue;
}
Shape operand_shape(
RowMajorShape(instruction->operand(operand_no)->shape()));
TF_RETURN_IF_ERROR(
SetOperandLayout(operand_shape, instruction, operand_no));
}
if (computation->parent()->entry_computation() == computation &&
computation->root_instruction() == instruction) {
continue;
}
if (!instruction->shape().IsArray()) {
continue;
}
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/cpu_layout_assignment.h"
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/computation_layout.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class CpuLayoutAssignmentTest : public HloTestBase {
protected:
void AssignLayouts(HloModule* module,
ComputationLayout* entry_computation_layout) {
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(entry_computation_layout,
&target_machine_features);
EXPECT_IS_OK(layout_assignment.Run(module).status());
}
};
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
dot_lhs->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
EXPECT_TRUE(
LayoutUtil::Equal(LayoutUtil::MakeLayout({0}), result->shape().layout()));
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor0) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_b_lhs, dot_rhs));
builder.AddInstruction(HloInstruction::CreateBinary(
result_shape, HloOpcode::kAdd, dot_a_result, dot_b_result));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
for (HloInstruction* instruction :
{dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor1) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_a_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape lhs_b_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_a_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
Shape result_b_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {0, 1});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_a_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_b_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_a_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_b_shape, dot_b_lhs, dot_rhs));
auto tuple_result = builder.AddInstruction(
HloInstruction::CreateTuple({dot_a_result, dot_b_result}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_a_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_b_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(tuple_result->shape()));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction :
{dot_rhs, dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantLhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(lhs_shape)));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, rhs_shape, "param0"));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(rhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensorThroughGTE) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape other_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 24}, {0, 1});
auto constant_shape = ShapeUtil::MakeTupleShape({other_shape, rhs_shape});
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(constant_shape)));
Shape result_shape = ShapeUtil::MakeShape(F32, {1, 24});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, constant, 1));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
struct DotOutputFusionLayoutAssignmentResult {
bool layout_assignment_changed_something;
const HloInstruction* dot_lhs_fusion_param;
const HloInstruction* dot_rhs_fusion_param;
const HloInstruction* addend_fusion_param;
};
static absl::StatusOr<DotOutputFusionLayoutAssignmentResult> RunDotOutputFusion(
HloModule* module, const std::string& test_name, int m, int k, int n,
const int64_t dot_operand_idx_in_add) {
DotOutputFusionLayoutAssignmentResult result;
CHECK(dot_operand_idx_in_add == 0 || dot_operand_idx_in_add == 1);
auto builder = HloComputation::Builder(test_name);
Shape dot_lhs_shape = ShapeUtil::MakeShape(F32, {m, k});
Shape dot_rhs_shape = ShapeUtil::MakeShape(F32, {k, n});
Shape dot_shape = ShapeUtil::MakeShape(F32, {m, n});
if (m == 1) {
dot_lhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {n});
} else if (n == 1) {
dot_rhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {m});
}
HloInstruction* dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, dot_lhs_shape, "param0"));
HloInstruction* addend = builder.AddInstruction(
HloInstruction::CreateParameter(1, dot_shape, "param1"));
HloInstruction* dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(dot_rhs_shape)));
HloInstruction* dot_result =
builder.AddInstruction(CreateCanonicalDot(dot_shape, dot_lhs, dot_rhs));
HloInstruction* add_result;
if (dot_operand_idx_in_add == 0) {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, dot_result, addend));
} else {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, addend, dot_result));
}
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion_instruction =
module->entry_computation()->AddInstruction(HloInstruction::CreateFusion(
dot_shape, HloInstruction::FusionKind::kOutput, add_result));
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(add_result, fusion_instruction));
HloInstruction* fused_add =
fusion_instruction->fused_instructions_computation()->root_instruction();
HloInstruction* fused_dot = fusion_instruction->FuseInstruction(dot_result);
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(dot_result));
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_lhs_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
result.dot_lhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(0)->parameter_number());
result.dot_rhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(1)->parameter_number());
result.addend_fusion_param = fusion_instruction->operand(
fused_add->operand(1 - dot_operand_idx_in_add)->parameter_number());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(&computation_layout,
&target_machine_features);
TF_ASSIGN_OR_RETURN(result.layout_assignment_changed_something,
layout_assignment.Run(module));
return result;
}
static void AssertCorrectLayoutForDotOutputFusion(
const HloComputation* computation,
const DotOutputFusionLayoutAssignmentResult& layout_assignment_result,
bool expect_col_major_dot_rhs) {
Layout expected_dot_rhs_layout = expect_col_major_dot_rhs
? LayoutUtil::MakeLayout({0, 1})
: LayoutUtil::MakeLayout({1, 0});
if (layout_assignment_result.dot_rhs_fusion_param->shape().rank() == 1) {
expected_dot_rhs_layout = LayoutUtil::MakeLayout({0});
}
EXPECT_TRUE(LayoutUtil::Equal(
expected_dot_rhs_layout,
layout_assignment_result.dot_rhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.dot_lhs_fusion_param->shape().rank()),
layout_assignment_result.dot_lhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.addend_fusion_param->shape().rank()),
layout_assignment_result.addend_fusion_param->shape().layout()));
EXPECT_THAT(computation->instructions(), Each(Not(op::Copy())));
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, BatchDotLayoutMustBeRowMajor) {
const char* hlo_string = R"(
HloModule BatchDotLayoutMustBeRowMajor
ENTRY BatchDotLayoutMustBeRowMajor {
p0 = f32[10,1,10] parameter(0)
p1 = f32[10,10,1] parameter(1)
ROOT dot = f32[10,1,1] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={2},
rhs_batch_dims={0},
rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 10}, {2, 1, 0}));
*computation_layout.mutable_parameter_layout(1) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 1}, {2, 1, 0}));
*computation_layout.mutable_result_layout() = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {1, 2, 0}));
AssignLayouts(module.get(), &computation_layout);
Shape expected_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {2, 1, 0});
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::ShapeWithLayout(expected_shape)));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::Dot(
op::ShapeWithLayout(computation_layout.parameter_layout(0).shape()),
op::ShapeWithLayout(
computation_layout.parameter_layout(1).shape()))));
}
}
} | static optional<int64_t> ShouldMakeOperandColumnMajor(
ShouldMakeOperandColMajorCache* cache, const HloInstruction& instruction) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(instruction);
if (!operand_idx) {
return nullopt;
}
const HloInstruction* operand = instruction.operand(*operand_idx);
if (operand->opcode() != HloOpcode::kConstant) {
return nullopt;
}
auto it = cache->find(operand);
if (it == cache->end()) {
auto insert_result =
cache->insert({operand, ShouldMakeAllUsersColMajor(operand)});
CHECK(insert_result.second);
it = insert_result.first;
}
return it->second ? operand_idx : nullopt;
} | TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
dot_lhs->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
EXPECT_TRUE(
LayoutUtil::Equal(LayoutUtil::MakeLayout({0}), result->shape().layout()));
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor0) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_b_lhs, dot_rhs));
builder.AddInstruction(HloInstruction::CreateBinary(
result_shape, HloOpcode::kAdd, dot_a_result, dot_b_result));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
for (HloInstruction* instruction :
{dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor1) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_a_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape lhs_b_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_a_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
Shape result_b_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {0, 1});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_a_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_b_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_a_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_b_shape, dot_b_lhs, dot_rhs));
auto tuple_result = builder.AddInstruction(
HloInstruction::CreateTuple({dot_a_result, dot_b_result}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_a_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_b_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(tuple_result->shape()));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction :
{dot_rhs, dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantLhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(lhs_shape)));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, rhs_shape, "param0"));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(rhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensorThroughGTE) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape other_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 24}, {0, 1});
auto constant_shape = ShapeUtil::MakeTupleShape({other_shape, rhs_shape});
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(constant_shape)));
Shape result_shape = ShapeUtil::MakeShape(F32, {1, 24});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, constant, 1));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
} |
#include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/convert/step_events_to_steps_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/utils/device_caps_utils.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/hardware_type_utils.h"
#include "tensorflow/core/profiler/utils/hlo_proto_map.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/tpu_xplane_utils.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::FindTensorCorePlanes;
std::string Hostname(const XSpace& space) {
if (space.hostnames().empty()) return "localhost";
DCHECK_EQ(space.hostnames_size(), 1);
const std::string& hostname = space.hostnames(0);
return hostname;
}
}
PerfEnv MakePerfEnv(double peak_tera_flops_per_second,
std::vector<double> peak_bws) {
PerfEnv result;
result.set_peak_tera_flops_per_second(peak_tera_flops_per_second);
for (const auto bw : peak_bws) {
result.add_peak_bws_giga_bytes_per_second(bw);
}
result.set_ridge_point(tsl::profiler::TeraToGiga(peak_tera_flops_per_second) /
peak_bws[MemBwType::MEM_BW_TYPE_HBM_RW]);
return result;
}
PerfEnv GetPerfEnvFromXPlane(const XPlane& device_plane) {
DeviceCapabilities cap = GetDeviceCaps(device_plane);
if (!absl::StartsWith(device_plane.name(), kTpuPlanePrefix)) {
return MakePerfEnv(
tsl::profiler::GigaToTera(GetFlopMaxThroughputPerSM(cap)) *
cap.num_cores(),
{tsl::profiler::UniToGiga(cap.memory_bandwidth()),
tsl::profiler::UniToGiga(cap.memory_bandwidth()),
tsl::profiler::UniToGiga(cap.memory_bandwidth()),
tsl::profiler::UniToGiga(cap.memory_bandwidth())});
} else {
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(&device_plane);
auto peak_tera_flops_per_second =
visitor.GetStat(StatType::kDevCapPeakTeraflopsPerSecond);
auto peak_tera_flops_per_second_val =
peak_tera_flops_per_second.has_value()
? peak_tera_flops_per_second->DoubleValue()
: 0.0;
auto peak_hbm_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakHbmBwGigabytesPerSecond);
auto peak_hbm_bw_giga_bytes_per_second_val =
peak_hbm_bw_giga_bytes_per_second.has_value()
? peak_hbm_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
auto peak_sram_rd_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakSramRdBwGigabytesPerSecond);
auto peak_sram_rd_bw_giga_bytes_per_second_val =
peak_sram_rd_bw_giga_bytes_per_second.has_value()
? peak_sram_rd_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
auto peak_sram_wr_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakSramWrBwGigabytesPerSecond);
auto peak_sram_wr_bw_giga_bytes_per_second_val =
peak_sram_wr_bw_giga_bytes_per_second.has_value()
? peak_sram_wr_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
return MakePerfEnv(peak_tera_flops_per_second_val,
{peak_hbm_bw_giga_bytes_per_second_val,
peak_sram_rd_bw_giga_bytes_per_second_val,
peak_sram_wr_bw_giga_bytes_per_second_val});
}
}
void SetRunEnvironment(const XSpace& space, RunEnvironment* env) {
env->set_host_count(1);
env->set_task_count(1);
env->mutable_hostnames()->insert({Hostname(space), true});
std::vector<const XPlane*> gpu_planes =
FindPlanesWithPrefix(space, kGpuPlanePrefix);
if (!gpu_planes.empty()) {
absl::string_view gpu_model =
GpuModelName(GetDeviceCaps(*gpu_planes.front()));
if (!gpu_model.empty()) {
env->set_device_type(std::string(gpu_model));
} else {
env->set_device_type("GPU");
}
env->set_device_core_count(gpu_planes.size());
} else if (std::vector<const XPlane*> tpu_planes =
FindTensorCorePlanes(space);
!tpu_planes.empty()) {
XPlaneVisitor visitor =
tsl::profiler::CreateTfXPlaneVisitor(tpu_planes.at(0));
auto xstat = visitor.GetStat(StatType::kDeviceTypeString);
if (xstat.has_value()) {
env->set_device_type(std::string(xstat->StrOrRefValue()));
}
env->set_device_core_count(tpu_planes.size());
} else {
env->set_device_type("CPU");
env->set_device_core_count(0);
}
}
void PropagateXSpaceDiagnosticsToOpStats(const XSpace& space,
OpStats* op_stats) {
if (!space.errors().empty()) {
absl::flat_hash_set<std::string> unique_errors;
unique_errors.insert(space.errors().begin(), space.errors().end());
*op_stats->mutable_diagnostics()->mutable_errors() = {unique_errors.begin(),
unique_errors.end()};
}
if (!space.warnings().empty()) {
absl::flat_hash_set<std::string> unique_warnings;
unique_warnings.insert(space.warnings().begin(), space.warnings().end());
*op_stats->mutable_diagnostics()->mutable_warnings() = {
unique_warnings.begin(), unique_warnings.end()};
}
}
void SetProgramIdToNameMap(const HloProtoMap& hlo_proto_map,
tensorflow::profiler::OpStats& op_stats) {
auto& program_id_to_name_map = *op_stats.mutable_program_id_to_name_map();
for (const auto& [program_id, hlo_proto] : hlo_proto_map) {
program_id_to_name_map[program_id] = hlo_proto->hlo_module().name();
}
}
OpStats ConvertXSpaceToOpStats(const XSpace& space,
const OpStatsOptions& options) {
std::vector<const XPlane*> device_planes = FindTensorCorePlanes(space);
bool is_tpu = !device_planes.empty();
if (!is_tpu) {
device_planes = FindPlanesWithPrefix(space, kGpuPlanePrefix);
}
OpStats op_stats;
StepEvents step_events;
PropagateXSpaceDiagnosticsToOpStats(space, &op_stats);
OpMetricsDbCombiner op_metrics_db_combiner(
op_stats.mutable_device_op_metrics_db());
SetRunEnvironment(space, op_stats.mutable_run_environment());
KernelReportMap reports;
for (const XPlane* device_trace : device_planes) {
XPlane aggregated_xplane;
bool use_aggregated_xplane = false;
if (options.generate_op_metrics_db) {
if (!op_stats.has_perf_env()) {
*op_stats.mutable_perf_env() = GetPerfEnvFromXPlane(*device_trace);
}
if (!is_tpu) {
OpMetricsDb device_op_metrics_db =
ConvertDeviceTraceXPlaneToOpMetricsDb(*device_trace);
op_metrics_db_combiner.Combine(device_op_metrics_db);
} else {
AggregateXPlane(*device_trace, aggregated_xplane);
use_aggregated_xplane = true;
OpMetricsDb device_op_metrics_db =
ConvertTpuDeviceTraceXPlaneToOpMetricsDb(aggregated_xplane);
op_metrics_db_combiner.Combine(device_op_metrics_db);
}
}
if (options.generate_step_db) {
StepEvents device_step_events = ConvertDeviceTraceXPlaneToStepEvents(
use_aggregated_xplane ? aggregated_xplane : *device_trace);
if (is_tpu) {
IntersectCombineStepEvents(device_step_events, &step_events);
} else {
UnionCombineStepEvents(device_step_events, &step_events);
}
}
if (options.generate_kernel_stats_db) {
ConvertDeviceTraceXPlaneToKernelReports(*device_trace,
{}, &reports);
}
}
if (options.generate_kernel_stats_db) {
CopyTopKDurationKernelReportsToDb(reports,
op_stats.mutable_kernel_stats_db());
}
bool has_device = !device_planes.empty();
const XPlane* host_plane = FindPlaneWithName(space, kHostThreadsPlaneName);
if (host_plane) {
if (options.generate_op_metrics_db) {
*op_stats.mutable_host_op_metrics_db() =
ConvertHostThreadsXPlaneToOpMetricsDb(*host_plane);
}
if (options.generate_step_db && !has_device) {
StepEvents host_step_events =
ConvertHostThreadsXPlaneToStepEvents(*host_plane, nullptr);
UnionCombineStepEvents(host_step_events, &step_events);
}
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(host_plane);
auto stat = visitor.GetStat(StatType::kMatrixUnitUtilizationPercent);
if (stat.has_value()) {
op_stats.mutable_performance_counter_result()
->set_matrix_unit_utilization_percent(stat->DoubleValue());
}
}
if (options.generate_step_db) {
if (is_tpu) {
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, options.maybe_drop_incomplete_steps, step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(step_events);
} else {
StepEvents nonoverlapped_step_events =
ToNonOverlappedStepEvents(step_events);
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, options.maybe_drop_incomplete_steps,
nonoverlapped_step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(nonoverlapped_step_events);
}
}
if (!is_tpu) {
CoreDetails& details =
(*op_stats.mutable_core_id_to_details())[kDefaultGpuLocalCoreId];
details.set_hostname(Hostname(space));
}
HloProtoMap hlo_proto_map;
hlo_proto_map.AddHloProtosFromXSpace(space);
SetProgramIdToNameMap(hlo_proto_map, op_stats);
return op_stats;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/multi_xplanes_to_op_stats.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/step_events_to_steps_db.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/group_events.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::Property;
using ::testing::UnorderedElementsAre;
TEST(ConvertXPlaneToOpStats, GpuPerfEnv) {
auto space = std::make_unique<XSpace>();
constexpr double kMaxError = 0.01;
constexpr int kClockRateKHz = 1530000;
constexpr int kCoreCount = 80;
constexpr uint64 kMemoryBandwidthBytesPerSecond =
uint64{900} * 1000 * 1000 * 1000;
constexpr int kComputeCapMajor = 7;
constexpr int kComputeCapMinor = 0;
XPlaneBuilder device_plane(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("clock_rate"),
kClockRateKHz);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("core_count"),
kCoreCount);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("memory_bandwidth"),
kMemoryBandwidthBytesPerSecond);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_major"),
kComputeCapMajor);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_minor"),
kComputeCapMinor);
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStatsOptions options;
options.generate_op_metrics_db = true;
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const PerfEnv& perf_env = op_stats.perf_env();
EXPECT_NEAR(141, perf_env.peak_tera_flops_per_second(), kMaxError);
EXPECT_NEAR(
900,
perf_env.peak_bws_giga_bytes_per_second(MemBwType::MEM_BW_TYPE_HBM_RW),
kMaxError);
EXPECT_NEAR(156.67, perf_env.ridge_point(), kMaxError);
}
TEST(ConvertXPlaneToOpStats, GpuRunEnvironment) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane1(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane1.AddStatValue(*device_plane1.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
XPlaneBuilder device_plane2(
GetOrCreateGpuXPlane(space.get(), 1));
device_plane2.AddStatValue(*device_plane2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(
session_snapshot_or.value(), OpStatsOptions(), &op_stats));
const RunEnvironment& run_env = op_stats.run_environment();
EXPECT_EQ("Nvidia GPU", run_env.device_type());
EXPECT_EQ(1, run_env.host_count());
EXPECT_EQ(1, run_env.task_count());
EXPECT_EQ(2, run_env.device_core_count());
}
TEST(ConvertXPlaneToOpStats, CpuOnlyStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
}
TEST(ConvertXPlaneToOpStats, GpuStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 20,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 10,
{{StatType::kCorrelationId, kCorrelationId}});
XPlaneBuilder device_plane_builder(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 50, 40,
{{StatType::kCorrelationId, kCorrelationId}});
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 0);
PrecisionStats precision_stats =
op_stats.device_op_metrics_db().precision_stats();
EXPECT_EQ(precision_stats.compute_16bit_ps(), 0);
EXPECT_EQ(precision_stats.compute_32bit_ps(), 40);
}
TEST(ConvertXPlaneToOpStats, PropagateAndDedupErrors) {
XSpace space;
static constexpr char kError[] = "host: error";
*space.add_errors() = kError;
*space.add_errors() = kError;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(1, op_stats.diagnostics().errors_size());
EXPECT_EQ(kError, op_stats.diagnostics().errors(0));
}
TEST(ConvertXPlaneToOpStats, Hostnames) {
XSpace space;
static constexpr char kHost[] = "host1";
*space.add_hostnames() = kHost;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(
kHost,
op_stats.core_id_to_details().at(kDefaultGpuLocalCoreId).hostname());
}
void BuildXSpaceForTest(XSpace& xspace, absl::string_view hostname) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 456;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&xspace));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &executor_thread, "aaa:bbb", 30, 70);
xspace.add_hostnames(std::string(hostname));
}
TEST(ConvertXPlaneToOpStats, TestConvertMultiXSpacesToCombinedOpStats) {
static constexpr char kHost1[] = "host1";
static constexpr char kHost2[] = "host2";
auto xspace1 = std::make_unique<XSpace>();
auto xspace2 = std::make_unique<XSpace>();
BuildXSpaceForTest(*xspace1, kHost1);
BuildXSpaceForTest(*xspace2, kHost2);
std::vector<std::string> xspace_paths;
xspace_paths.push_back("host1.pb");
xspace_paths.push_back("host2.pb");
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace1));
xspaces.push_back(std::move(xspace2));
auto session_snapshot_or =
SessionSnapshot::Create(std::move(xspace_paths), std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
OpStats combined_op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &combined_op_stats))
<< "Failed to convert multi XSpace to OpStats";
ASSERT_EQ(combined_op_stats.host_op_metrics_db().metrics_db_size(), 2);
const auto& metric = combined_op_stats.host_op_metrics_db().metrics_db(1);
EXPECT_EQ(metric.name(), "aaa");
EXPECT_EQ(metric.category(), "bbb");
EXPECT_EQ(metric.self_time_ps(), 140);
ASSERT_EQ(combined_op_stats.step_db().step_sequence_size(), 1);
ASSERT_EQ(
combined_op_stats.step_db().step_sequence(0).step_info_per_core_size(),
2);
const auto& step_info_per_core =
combined_op_stats.step_db().step_sequence(0).step_info_per_core();
EXPECT_TRUE(step_info_per_core.contains(kDefaultGpuLocalCoreId));
EXPECT_TRUE(step_info_per_core.contains(1000 + kDefaultGpuLocalCoreId));
const auto& core_details_map = combined_op_stats.core_id_to_details();
EXPECT_EQ(kHost1, core_details_map.at(kDefaultGpuLocalCoreId).hostname());
EXPECT_EQ(kHost2,
core_details_map.at(1000 + kDefaultGpuLocalCoreId).hostname());
}
TEST(ConvertXPlaneToOpStats, RunEnvironmentExtractedFromTpuPlane) {
XSpace xspace;
for (int i : {0, 1, 2, 3}) {
GetOrCreateTpuXPlane(&xspace, i, "TPU V4", 0, 0);
}
OpStats op_stats = ConvertXSpaceToOpStats(xspace, OpStatsOptions());
EXPECT_EQ(op_stats.run_environment().device_type(), "TPU V4");
EXPECT_EQ(op_stats.run_environment().device_core_count(), 4);
}
TEST(ConvertXPlaneToOpStats, TpuPerfEnv) {
auto space = std::make_unique<XSpace>();
constexpr double kMaxError = 0.01;
constexpr int kClockRateKHz = 1530000;
constexpr int kCoreCount = 80;
constexpr uint64 kMemoryBandwidthBytesPerSecond =
uint64{900} * 1000 * 1000 * 1000;
constexpr int kComputeCapMajor = 7;
constexpr int kComputeCapMinor = 0;
constexpr double kDevCapPeakTeraflopsPerSecond = 141.0;
constexpr double kDevCapPeakHbmBwGigabytesPerSecond = 900.0;
XPlaneBuilder device_plane(GetOrCreateTpuXPlane(
space.get(), 0, "TPU V4",
kDevCapPeakTeraflopsPerSecond, kDevCapPeakHbmBwGigabytesPerSecond));
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("clock_rate"),
kClockRateKHz);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("core_count"),
kCoreCount);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("memory_bandwidth"),
kMemoryBandwidthBytesPerSecond);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_major"),
kComputeCapMajor);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_minor"),
kComputeCapMinor);
OpStatsOptions options;
options.generate_op_metrics_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const PerfEnv& perf_env = op_stats.perf_env();
EXPECT_NEAR(141, perf_env.peak_tera_flops_per_second(), kMaxError);
EXPECT_NEAR(
900,
perf_env.peak_bws_giga_bytes_per_second(MemBwType::MEM_BW_TYPE_HBM_RW),
kMaxError);
EXPECT_NEAR(156.67, perf_env.ridge_point(), kMaxError);
}
TEST(ConvertXPlaneToOpStats, TpuRunEnvironment) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane1(
GetOrCreateTpuXPlane(space.get(), 0, "TPU V4", 0, 0));
XPlaneBuilder device_plane2(
GetOrCreateTpuXPlane(space.get(), 1, "TPU V4", 0, 0));
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(
session_snapshot_or.value(), OpStatsOptions(), &op_stats));
const RunEnvironment& run_env = op_stats.run_environment();
EXPECT_EQ("TPU V4", run_env.device_type());
EXPECT_EQ(1, run_env.host_count());
EXPECT_EQ(1, run_env.task_count());
EXPECT_EQ(2, run_env.device_core_count());
}
TEST(ConvertXPlaneToOpStats, TpuDeviceTraceToStepDb) {
auto space = std::make_unique<XSpace>();
constexpr double kDevCapPeakTeraflopsPerSecond = 141.0;
constexpr double kDevCapPeakHbmBwGigabytesPerSecond = 1000.0;
XPlaneBuilder xplane_builder(GetOrCreateTpuXPlane(
space.get(), 0, "TPU V4",
kDevCapPeakTeraflopsPerSecond, kDevCapPeakHbmBwGigabytesPerSecond));
XEventMetadata* event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata->set_name("op_name");
XStatsBuilder<XEventMetadata> stats(event_metadata, &xplane_builder);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSymbolId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSelfDurationPs)),
10);
stats.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
"tf_op_name");
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloCategory)),
"category");
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(0);
event.SetDurationNs(10);
OpStatsOptions options;
options.generate_op_metrics_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
EXPECT_THAT(op_stats.device_op_metrics_db().metrics_db(),
UnorderedElementsAre(Property(&OpMetrics::name, "op_name"),
Property(&OpMetrics::name, "IDLE")));
}
TEST(ConvertXPlaneToOpStats, TpuMultiDeviceStepDbTest) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane_builder1(
GetOrCreateTpuXPlane(space.get(), 0, "TPU V4", 0, 0));
XPlaneBuilder device_plane_builder2(
GetOrCreateTpuXPlane(space.get(), 1, "TPU V4", 0, 0));
device_plane_builder1.ReserveLines(1);
device_plane_builder2.ReserveLines(1);
XStatMetadata* kGroupId1 = device_plane_builder1.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XLineBuilder line = device_plane_builder1.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XEventMetadata* event_metadata =
device_plane_builder1.GetOrCreateEventMetadata(1);
event_metadata->set_name("Step 1");
XEventBuilder event_builder = line.AddEvent(*event_metadata);
event_builder.AddStatValue(*kGroupId1, 1);
event_builder.SetDurationNs(100);
event_builder.SetOffsetNs(100);
line = device_plane_builder2.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XStatMetadata* kGroupId2 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata2 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata2->set_name("Step 1");
XEventBuilder event_builder2 = line.AddEvent(*event_metadata2);
event_builder2.AddStatValue(*kGroupId2, 1);
event_builder2.SetDurationNs(100);
event_builder2.SetOffsetNs(300);
XStatMetadata* kGroupId3 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata3 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata3->set_name("Step 2");
XEventBuilder event_builder3 = line.AddEvent(*event_metadata3);
event_builder3.AddStatValue(*kGroupId3, 2);
event_builder3.SetDurationNs(100);
event_builder3.SetOffsetNs(300);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
OpStats op_stats = ConvertXSpaceToOpStats(*space, options);
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
}
}
}
} | OpStats ConvertXSpaceToOpStats(const XSpace& space,
const OpStatsOptions& options) {
std::vector<const XPlane*> device_planes = FindTensorCorePlanes(space);
bool is_tpu = !device_planes.empty();
if (!is_tpu) {
device_planes = FindPlanesWithPrefix(space, kGpuPlanePrefix);
}
OpStats op_stats;
StepEvents step_events;
PropagateXSpaceDiagnosticsToOpStats(space, &op_stats);
OpMetricsDbCombiner op_metrics_db_combiner(
op_stats.mutable_device_op_metrics_db());
SetRunEnvironment(space, op_stats.mutable_run_environment());
KernelReportMap reports;
for (const XPlane* device_trace : device_planes) {
XPlane aggregated_xplane;
bool use_aggregated_xplane = false;
if (options.generate_op_metrics_db) {
if (!op_stats.has_perf_env()) {
*op_stats.mutable_perf_env() = GetPerfEnvFromXPlane(*device_trace);
}
if (!is_tpu) {
OpMetricsDb device_op_metrics_db =
ConvertDeviceTraceXPlaneToOpMetricsDb(*device_trace);
op_metrics_db_combiner.Combine(device_op_metrics_db);
} else {
AggregateXPlane(*device_trace, aggregated_xplane);
use_aggregated_xplane = true;
OpMetricsDb device_op_metrics_db =
ConvertTpuDeviceTraceXPlaneToOpMetricsDb(aggregated_xplane);
op_metrics_db_combiner.Combine(device_op_metrics_db);
}
}
if (options.generate_step_db) {
StepEvents device_step_events = ConvertDeviceTraceXPlaneToStepEvents(
use_aggregated_xplane ? aggregated_xplane : *device_trace);
if (is_tpu) {
IntersectCombineStepEvents(device_step_events, &step_events);
} else {
UnionCombineStepEvents(device_step_events, &step_events);
}
}
if (options.generate_kernel_stats_db) {
ConvertDeviceTraceXPlaneToKernelReports(*device_trace,
{}, &reports);
}
}
if (options.generate_kernel_stats_db) {
CopyTopKDurationKernelReportsToDb(reports,
op_stats.mutable_kernel_stats_db());
}
bool has_device = !device_planes.empty();
const XPlane* host_plane = FindPlaneWithName(space, kHostThreadsPlaneName);
if (host_plane) {
if (options.generate_op_metrics_db) {
*op_stats.mutable_host_op_metrics_db() =
ConvertHostThreadsXPlaneToOpMetricsDb(*host_plane);
}
if (options.generate_step_db && !has_device) {
StepEvents host_step_events =
ConvertHostThreadsXPlaneToStepEvents(*host_plane, nullptr);
UnionCombineStepEvents(host_step_events, &step_events);
}
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(host_plane);
auto stat = visitor.GetStat(StatType::kMatrixUnitUtilizationPercent);
if (stat.has_value()) {
op_stats.mutable_performance_counter_result()
->set_matrix_unit_utilization_percent(stat->DoubleValue());
}
}
if (options.generate_step_db) {
if (is_tpu) {
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, options.maybe_drop_incomplete_steps, step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(step_events);
} else {
StepEvents nonoverlapped_step_events =
ToNonOverlappedStepEvents(step_events);
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, options.maybe_drop_incomplete_steps,
nonoverlapped_step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(nonoverlapped_step_events);
}
}
if (!is_tpu) {
CoreDetails& details =
(*op_stats.mutable_core_id_to_details())[kDefaultGpuLocalCoreId];
details.set_hostname(Hostname(space));
}
HloProtoMap hlo_proto_map;
hlo_proto_map.AddHloProtosFromXSpace(space);
SetProgramIdToNameMap(hlo_proto_map, op_stats);
return op_stats;
} | TEST(ConvertXPlaneToOpStats, CpuOnlyStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
}
TEST(ConvertXPlaneToOpStats, GpuStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 20,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 10,
{{StatType::kCorrelationId, kCorrelationId}});
XPlaneBuilder device_plane_builder(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 50, 40,
{{StatType::kCorrelationId, kCorrelationId}});
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 0);
PrecisionStats precision_stats =
op_stats.device_op_metrics_db().precision_stats();
EXPECT_EQ(precision_stats.compute_16bit_ps(), 0);
EXPECT_EQ(precision_stats.compute_32bit_ps(), 40);
}
TEST(ConvertXPlaneToOpStats, PropagateAndDedupErrors) {
XSpace space;
static constexpr char kError[] = "host: error";
*space.add_errors() = kError;
*space.add_errors() = kError;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(1, op_stats.diagnostics().errors_size());
EXPECT_EQ(kError, op_stats.diagnostics().errors(0));
}
TEST(ConvertXPlaneToOpStats, Hostnames) {
XSpace space;
static constexpr char kHost[] = "host1";
*space.add_hostnames() = kHost;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(
kHost,
op_stats.core_id_to_details().at(kDefaultGpuLocalCoreId).hostname());
}
TEST(ConvertXPlaneToOpStats, RunEnvironmentExtractedFromTpuPlane) {
XSpace xspace;
for (int i : {0, 1, 2, 3}) {
GetOrCreateTpuXPlane(&xspace, i, "TPU V4", 0, 0);
}
OpStats op_stats = ConvertXSpaceToOpStats(xspace, OpStatsOptions());
EXPECT_EQ(op_stats.run_environment().device_type(), "TPU V4");
EXPECT_EQ(op_stats.run_environment().device_core_count(), 4);
}
TEST(ConvertXPlaneToOpStats, TpuDeviceTraceToStepDb) {
auto space = std::make_unique<XSpace>();
constexpr double kDevCapPeakTeraflopsPerSecond = 141.0;
constexpr double kDevCapPeakHbmBwGigabytesPerSecond = 1000.0;
XPlaneBuilder xplane_builder(GetOrCreateTpuXPlane(
space.get(), 0, "TPU V4",
kDevCapPeakTeraflopsPerSecond, kDevCapPeakHbmBwGigabytesPerSecond));
XEventMetadata* event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata->set_name("op_name");
XStatsBuilder<XEventMetadata> stats(event_metadata, &xplane_builder);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSymbolId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSelfDurationPs)),
10);
stats.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
"tf_op_name");
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloCategory)),
"category");
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(0);
event.SetDurationNs(10);
OpStatsOptions options;
options.generate_op_metrics_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
EXPECT_THAT(op_stats.device_op_metrics_db().metrics_db(),
UnorderedElementsAre(Property(&OpMetrics::name, "op_name"),
Property(&OpMetrics::name, "IDLE")));
}
TEST(ConvertXPlaneToOpStats, TpuMultiDeviceStepDbTest) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane_builder1(
GetOrCreateTpuXPlane(space.get(), 0, "TPU V4", 0, 0));
XPlaneBuilder device_plane_builder2(
GetOrCreateTpuXPlane(space.get(), 1, "TPU V4", 0, 0));
device_plane_builder1.ReserveLines(1);
device_plane_builder2.ReserveLines(1);
XStatMetadata* kGroupId1 = device_plane_builder1.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XLineBuilder line = device_plane_builder1.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XEventMetadata* event_metadata =
device_plane_builder1.GetOrCreateEventMetadata(1);
event_metadata->set_name("Step 1");
XEventBuilder event_builder = line.AddEvent(*event_metadata);
event_builder.AddStatValue(*kGroupId1, 1);
event_builder.SetDurationNs(100);
event_builder.SetOffsetNs(100);
line = device_plane_builder2.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XStatMetadata* kGroupId2 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata2 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata2->set_name("Step 1");
XEventBuilder event_builder2 = line.AddEvent(*event_metadata2);
event_builder2.AddStatValue(*kGroupId2, 1);
event_builder2.SetDurationNs(100);
event_builder2.SetOffsetNs(300);
XStatMetadata* kGroupId3 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata3 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata3->set_name("Step 2");
XEventBuilder event_builder3 = line.AddEvent(*event_metadata3);
event_builder3.AddStatValue(*kGroupId3, 2);
event_builder3.SetDurationNs(100);
event_builder3.SetOffsetNs(300);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
OpStats op_stats = ConvertXSpaceToOpStats(*space, options);
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
} |
#include "xla/backends/profiler/gpu/cupti_error_manager.h"
#include <utility>
#include "absl/debugging/leak_check.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace profiler {
using tsl::mutex_lock;
CuptiErrorManager::CuptiErrorManager(std::unique_ptr<CuptiInterface> interface)
: interface_(std::move(interface)), disabled_(0), undo_disabled_(false) {}
#define IGNORE_CALL_IF_DISABLED \
if (disabled_) { \
LOG(ERROR) << "cupti" << __func__ << ": ignored due to a previous error."; \
return CUPTI_ERROR_DISABLED; \
} \
VLOG(1) << "cupti" << __func__;
#define ALLOW_ERROR(e, ERROR) \
if (e == ERROR) { \
VLOG(1) << "cupti" << __func__ << ": error " << static_cast<int>(e) \
<< ": " << ResultString(e) << " (allowed)"; \
return e; \
}
#define LOG_AND_DISABLE_IF_ERROR(e) \
if (e != CUPTI_SUCCESS) { \
LOG(ERROR) << "cupti" << __func__ << ": error " << static_cast<int>(e) \
<< ": " << ResultString(e); \
UndoAndDisable(); \
}
void CuptiErrorManager::RegisterUndoFunction(
const CuptiErrorManager::UndoFunction& func) {
mutex_lock lock(undo_stack_mu_);
undo_stack_.push_back(func);
}
CUptiResult CuptiErrorManager::ActivityDisable(CUpti_ActivityKind kind) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityDisable(kind);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityEnable(CUpti_ActivityKind kind) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityEnable(kind);
if (error == CUPTI_SUCCESS) {
auto f = std::bind(&CuptiErrorManager::ActivityDisable, this, kind);
RegisterUndoFunction(f);
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityFlushAll(uint32_t flag) {
CUptiResult error = interface_->ActivityFlushAll(flag);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityGetNextRecord(
uint8_t* buffer, size_t valid_buffer_size_bytes, CUpti_Activity** record) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityGetNextRecord(
buffer, valid_buffer_size_bytes, record);
ALLOW_ERROR(error, CUPTI_ERROR_MAX_LIMIT_REACHED);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityGetNumDroppedRecords(CUcontext context,
uint32_t stream_id,
size_t* dropped) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->ActivityGetNumDroppedRecords(context, stream_id, dropped);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityConfigureUnifiedMemoryCounter(
CUpti_ActivityUnifiedMemoryCounterConfig* config, uint32_t count) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->ActivityConfigureUnifiedMemoryCounter(config, count);
return error;
}
CUptiResult CuptiErrorManager::ActivityRegisterCallbacks(
CUpti_BuffersCallbackRequestFunc func_buffer_requested,
CUpti_BuffersCallbackCompleteFunc func_buffer_completed) {
IGNORE_CALL_IF_DISABLED;
absl::LeakCheckDisabler disabler;
CUptiResult error = interface_->ActivityRegisterCallbacks(
func_buffer_requested, func_buffer_completed);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityUsePerThreadBuffer() {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityUsePerThreadBuffer();
return error;
}
CUptiResult CuptiErrorManager::GetDeviceId(CUcontext context,
uint32_t* device_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetDeviceId(context, device_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetTimestamp(uint64_t* timestamp) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetTimestamp(timestamp);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::Finalize() {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->Finalize();
ALLOW_ERROR(error, CUPTI_ERROR_API_NOT_IMPLEMENTED);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::EnableCallback(uint32_t enable,
CUpti_SubscriberHandle subscriber,
CUpti_CallbackDomain domain,
CUpti_CallbackId callback_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->EnableCallback(enable, subscriber, domain, callback_id);
if (error == CUPTI_SUCCESS) {
if (enable == 1) {
auto f = std::bind(&CuptiErrorManager::EnableCallback, this,
0 , subscriber, domain, callback_id);
RegisterUndoFunction(f);
}
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::EnableDomain(uint32_t enable,
CUpti_SubscriberHandle subscriber,
CUpti_CallbackDomain domain) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->EnableDomain(enable, subscriber, domain);
if (error == CUPTI_SUCCESS) {
if (enable == 1) {
auto f = std::bind(&CuptiErrorManager::EnableDomain, this,
0 , subscriber, domain);
RegisterUndoFunction(f);
}
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::Subscribe(CUpti_SubscriberHandle* subscriber,
CUpti_CallbackFunc callback,
void* userdata) {
IGNORE_CALL_IF_DISABLED;
absl::LeakCheckDisabler disabler;
CUptiResult error = interface_->Subscribe(subscriber, callback, userdata);
if (error == CUPTI_SUCCESS) {
auto f = std::bind(&CuptiErrorManager::Unsubscribe, this, *subscriber);
RegisterUndoFunction(f);
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::Unsubscribe(CUpti_SubscriberHandle subscriber) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->Unsubscribe(subscriber);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
void CuptiErrorManager::UndoAndDisable() {
if (undo_disabled_) {
return;
}
mutex_lock lock(undo_stack_mu_);
undo_disabled_ = true;
while (!undo_stack_.empty()) {
LOG(ERROR) << "CuptiErrorManager is disabling profiling automatically.";
undo_stack_.back()();
undo_stack_.pop_back();
}
undo_disabled_ = false;
disabled_ = 1;
}
CUptiResult CuptiErrorManager::GetResultString(CUptiResult result,
const char** str) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetResultString(result, str);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetContextId(CUcontext context,
uint32_t* context_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetContextId(context, context_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetStreamIdEx(CUcontext context, CUstream stream,
uint8_t per_thread_stream,
uint32_t* stream_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->GetStreamIdEx(context, stream, per_thread_stream, stream_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
void CuptiErrorManager::CleanUp() {
if (undo_disabled_) {
return;
}
mutex_lock lock(undo_stack_mu_);
undo_disabled_ = true;
while (!undo_stack_.empty()) {
undo_stack_.pop_back();
}
undo_disabled_ = false;
}
std::string CuptiErrorManager::ResultString(CUptiResult error) const {
const char* error_message = nullptr;
if (interface_->GetResultString(error, &error_message) == CUPTI_SUCCESS &&
error_message != nullptr) {
return error_message;
}
return "";
}
}
} | #if GOOGLE_CUDA
#include "xla/backends/profiler/gpu/cupti_error_manager.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "xla/backends/profiler/gpu/cuda_test.h"
#include "xla/backends/profiler/gpu/cupti_interface.h"
#include "xla/backends/profiler/gpu/cupti_tracer.h"
#include "xla/backends/profiler/gpu/cupti_wrapper.h"
#include "xla/backends/profiler/gpu/mock_cupti.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/utils/time_utils.h"
namespace xla {
namespace profiler {
namespace test {
using xla::profiler::CuptiInterface;
using xla::profiler::CuptiTracer;
using xla::profiler::CuptiTracerCollectorOptions;
using xla::profiler::CuptiTracerOptions;
using xla::profiler::CuptiWrapper;
using ::testing::_;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::Sequence;
using ::testing::StrictMock;
class TestableCuptiTracer : public CuptiTracer {
public:
explicit TestableCuptiTracer(CuptiInterface* cupti_interface)
: CuptiTracer(cupti_interface) {}
};
class CuptiErrorManagerTest : public ::testing::Test {
protected:
CuptiErrorManagerTest() {}
void SetUp() override {
ASSERT_GT(CuptiTracer::NumGpus(), 0) << "No devices found";
auto mock_cupti = std::make_unique<StrictMock<MockCupti>>();
mock_ = mock_cupti.get();
cupti_error_manager_ =
std::make_unique<CuptiErrorManager>(std::move(mock_cupti));
cupti_tracer_ =
std::make_unique<TestableCuptiTracer>(cupti_error_manager_.get());
cupti_wrapper_ = std::make_unique<CuptiWrapper>();
CuptiTracerCollectorOptions collector_options;
collector_options.num_gpus = CuptiTracer::NumGpus();
uint64_t start_gputime_ns = CuptiTracer::GetTimestamp();
uint64_t start_walltime_ns = tsl::profiler::GetCurrentTimeNanos();
cupti_collector_ = CreateCuptiCollector(
collector_options, start_walltime_ns, start_gputime_ns);
}
void EnableProfiling(const CuptiTracerOptions& option) {
cupti_tracer_->Enable(option, cupti_collector_.get());
}
void DisableProfiling() { cupti_tracer_->Disable(); }
bool CuptiDisabled() const { return cupti_error_manager_->Disabled(); }
void RunGpuApp() {
MemCopyH2D();
PrintfKernel(10);
Synchronize();
MemCopyD2H();
}
StrictMock<MockCupti>* mock_;
std::unique_ptr<TestableCuptiTracer> cupti_tracer_ = nullptr;
std::unique_ptr<CuptiInterface> cupti_error_manager_;
std::unique_ptr<CuptiWrapper> cupti_wrapper_;
std::unique_ptr<xla::profiler::CuptiTraceCollector> cupti_collector_;
};
TEST_F(CuptiErrorManagerTest, GpuTraceActivityEnableTest) {
Sequence s1;
EXPECT_CALL(*mock_, Subscribe(_, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe));
EXPECT_CALL(*mock_, EnableCallback(1, _, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer())
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityUsePerThreadBuffer));
EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityRegisterCallbacks));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL))
.InSequence(s1)
.WillOnce(Return(CUPTI_ERROR_UNKNOWN));
EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString));
EXPECT_CALL(*mock_, EnableCallback(0, _, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
EXPECT_CALL(*mock_, Unsubscribe(_))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe));
EXPECT_FALSE(CuptiDisabled());
CuptiTracerOptions options;
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
options.cbids_selected.push_back(CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel);
EnableProfiling(options);
EXPECT_TRUE(CuptiDisabled());
RunGpuApp();
EXPECT_TRUE(CuptiDisabled());
DisableProfiling();
EXPECT_TRUE(CuptiDisabled());
}
TEST_F(CuptiErrorManagerTest, GpuTraceAutoEnableTest) {
EXPECT_FALSE(CuptiDisabled());
Sequence s1;
EXPECT_CALL(*mock_, Subscribe(_, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe));
EXPECT_CALL(*mock_, EnableDomain(1, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain));
EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer())
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityUsePerThreadBuffer));
EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityRegisterCallbacks));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityEnable));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY2))
.InSequence(s1)
.WillOnce(Return(CUPTI_ERROR_UNKNOWN));
EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString));
EXPECT_CALL(*mock_, ActivityDisable(CUPTI_ACTIVITY_KIND_MEMCPY))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityDisable));
EXPECT_CALL(*mock_, EnableDomain(0, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain));
EXPECT_CALL(*mock_, Unsubscribe(_))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe));
EXPECT_FALSE(CuptiDisabled());
CuptiTracerOptions options;
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY2);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
EnableProfiling(options);
EXPECT_TRUE(CuptiDisabled());
RunGpuApp();
EXPECT_TRUE(CuptiDisabled());
DisableProfiling();
EXPECT_TRUE(CuptiDisabled());
}
}
}
}
#endif | CUptiResult CuptiErrorManager::ActivityUsePerThreadBuffer() {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityUsePerThreadBuffer();
return error;
} | TEST_F(CuptiErrorManagerTest, GpuTraceActivityEnableTest) {
Sequence s1;
EXPECT_CALL(*mock_, Subscribe(_, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe));
EXPECT_CALL(*mock_, EnableCallback(1, _, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer())
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityUsePerThreadBuffer));
EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityRegisterCallbacks));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL))
.InSequence(s1)
.WillOnce(Return(CUPTI_ERROR_UNKNOWN));
EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString));
EXPECT_CALL(*mock_, EnableCallback(0, _, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
EXPECT_CALL(*mock_, Unsubscribe(_))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe));
EXPECT_FALSE(CuptiDisabled());
CuptiTracerOptions options;
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
options.cbids_selected.push_back(CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel);
EnableProfiling(options);
EXPECT_TRUE(CuptiDisabled());
RunGpuApp();
EXPECT_TRUE(CuptiDisabled());
DisableProfiling();
EXPECT_TRUE(CuptiDisabled());
}
TEST_F(CuptiErrorManagerTest, GpuTraceAutoEnableTest) {
EXPECT_FALSE(CuptiDisabled());
Sequence s1;
EXPECT_CALL(*mock_, Subscribe(_, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe));
EXPECT_CALL(*mock_, EnableDomain(1, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain));
EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer())
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityUsePerThreadBuffer));
EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityRegisterCallbacks));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityEnable));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY2))
.InSequence(s1)
.WillOnce(Return(CUPTI_ERROR_UNKNOWN));
EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString));
EXPECT_CALL(*mock_, ActivityDisable(CUPTI_ACTIVITY_KIND_MEMCPY))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityDisable));
EXPECT_CALL(*mock_, EnableDomain(0, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain));
EXPECT_CALL(*mock_, Unsubscribe(_))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe));
EXPECT_FALSE(CuptiDisabled());
CuptiTracerOptions options;
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY2);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
EnableProfiling(options);
EXPECT_TRUE(CuptiDisabled());
RunGpuApp();
EXPECT_TRUE(CuptiDisabled());
DisableProfiling();
EXPECT_TRUE(CuptiDisabled());
} |
#include "xla/service/spmd/partition_assignment.h"
#include <cstdint>
#include <memory>
namespace xla {
PartitioningAlgorithm::PartitioningAlgorithm(AlgorithmKind kind,
int64_t num_partitions) {
kind_ = kind;
CHECK_GT(num_partitions, 1) << "Number of partitions must be at least two.";
num_partitions_ = num_partitions;
}
absl::string_view PartitioningAlgorithm::name() const {
switch (kind_) {
case AlgorithmKind::kNoop:
default:
return "Noop";
}
}
const PartitioningAlgorithm::AlgorithmKind& PartitioningAlgorithm::kind()
const {
return kind_;
}
int64_t PartitioningAlgorithm::num_partitions() const {
return num_partitions_;
}
std::unique_ptr<PartitioningAlgorithm>
PartitioningAlgorithm::CreateNoopPartitioning(int64_t num_partitions) {
return std::make_unique<NoopPartitioning>(num_partitions);
}
NoopPartitioning::NoopPartitioning(int64_t num_partitions)
: PartitioningAlgorithm(AlgorithmKind::kNoop, num_partitions) {
VLOG(2) << "Created a no-op algorithm with the number of partitions: "
<< num_partitions;
}
absl::StatusOr<bool> NoopPartitioning::Run(HloModule* module) const {
VLOG(2) << "No-op algorithm was called to partition module: "
<< module->name();
return false;
}
PartitionAssignment::PartitionAssignment(int64_t num_partitions) {
CHECK_GT(num_partitions, 1) << "Number of partitions must be at least two.";
num_partitions_ = num_partitions;
}
absl::string_view PartitionAssignment::name() const {
return "partitioning-assignment";
}
const PartitioningAlgorithm* PartitionAssignment::algorithm() {
return algorithm_.get();
}
int64_t PartitionAssignment::num_partitions() const { return num_partitions_; }
std::unique_ptr<PartitioningAlgorithm>
PartitionAssignment::ChoosePartitioningAlgorithm(
const HloModule& module) const {
auto algo = module.config().debug_options().xla_partitioning_algorithm();
CHECK_EQ(algo, DebugOptions::PARTITIONING_ALGORITHM_NOOP);
return PartitioningAlgorithm::CreateNoopPartitioning(num_partitions());
}
absl::StatusOr<bool> PartitionAssignment::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Running partition assignment on module " << module->name();
algorithm_ = ChoosePartitioningAlgorithm(*module);
return algorithm()->Run(module);
}
} | #include "xla/service/spmd/partition_assignment.h"
#include <memory>
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
namespace xla {
namespace {
using PartitionAssignmentTest = HloTestBase;
TEST_F(PartitionAssignmentTest, NoopAlg) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16,16]{1,0} parameter(0)
ROOT %copy = f32[16,16]{1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_partitioning_algorithm(
DebugOptions::PARTITIONING_ALGORITHM_NOOP);
PartitionAssignment partition_assignment(16);
EXPECT_EQ(partition_assignment.algorithm(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(bool changed, partition_assignment.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_NE(partition_assignment.algorithm(), nullptr);
EXPECT_EQ(partition_assignment.algorithm()->kind(),
PartitioningAlgorithm::AlgorithmKind::kNoop);
}
}
} | const PartitioningAlgorithm::AlgorithmKind& PartitioningAlgorithm::kind()
const {
return kind_;
} | TEST_F(PartitionAssignmentTest, NoopAlg) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16,16]{1,0} parameter(0)
ROOT %copy = f32[16,16]{1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_partitioning_algorithm(
DebugOptions::PARTITIONING_ALGORITHM_NOOP);
PartitionAssignment partition_assignment(16);
EXPECT_EQ(partition_assignment.algorithm(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(bool changed, partition_assignment.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_NE(partition_assignment.algorithm(), nullptr);
EXPECT_EQ(partition_assignment.algorithm()->kind(),
PartitioningAlgorithm::AlgorithmKind::kNoop);
} |
#include "tensorflow/core/grappler/utils/functions.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/strings/scanner.h"
namespace tensorflow {
namespace grappler {
GrapplerFunctionItem::GrapplerFunctionItem(
string func_name, string description, AttrSlice func_attr,
std::vector<const FunctionDef::ArgAttrs*> arg_attr,
std::vector<InputArgInstantiation> input_args,
std::vector<OutputArgInstantiation> output_args,
std::vector<ControlOutput> control_outputs, const int graph_def_version,
const bool is_stateful, GraphDef&& function_body)
: description_(std::move(description)),
func_attr_(func_attr),
arg_attr_(std::move(arg_attr)),
input_args_(std::move(input_args)),
output_args_(std::move(output_args)),
control_outputs_(std::move(control_outputs)),
is_stateful_(is_stateful) {
id = std::move(func_name);
graph = std::move(function_body);
graph.mutable_versions()->set_producer(graph_def_version);
for (const InputArgInstantiation& input_arg : input_args_) {
feed.push_back({input_arg.node_name, Tensor()});
}
for (const OutputArgInstantiation& output_arg : output_args_) {
fetch.push_back(output_arg.node_name);
}
for (const ControlOutput& control_output : control_outputs_) {
keep_ops.push_back(control_output.node_name);
}
optimization_options().allow_pruning_stateful_and_dataset_ops = false;
}
const string& GrapplerFunctionItem::description() const { return description_; }
const std::vector<InputArgInstantiation>& GrapplerFunctionItem::inputs() const {
return input_args_;
}
const InputArgInstantiation& GrapplerFunctionItem::input(int i) const {
return input_args_[i];
}
const std::size_t GrapplerFunctionItem::input_size() const {
return input_args_.size();
}
const std::vector<OutputArgInstantiation>& GrapplerFunctionItem::outputs()
const {
return output_args_;
}
const OutputArgInstantiation& GrapplerFunctionItem::output(int i) const {
return output_args_[i];
}
const std::size_t GrapplerFunctionItem::output_size() const {
return output_args_.size();
}
const std::vector<ControlOutput>& GrapplerFunctionItem::control_outputs()
const {
return control_outputs_;
}
const std::size_t GrapplerFunctionItem::control_output_size() const {
return control_outputs_.size();
}
const AttrSlice& GrapplerFunctionItem::func_attr() const { return func_attr_; }
const std::vector<const FunctionDef::ArgAttrs*>&
GrapplerFunctionItem::arg_attr() const {
return arg_attr_;
}
const GraphDef& GrapplerFunctionItem::function_body() const { return graph; }
GraphDef& GrapplerFunctionItem::mutable_function_body() { return graph; }
bool GrapplerFunctionItem::is_stateful() const { return is_stateful_; }
GrapplerFunctionItem& GrapplerFunctionItem::SwapFunctionBody(GraphDef&& other) {
graph = std::move(other);
return *this;
}
bool HasParametrizedType(const FunctionDef& func) {
const auto is_type_parametrized = [](const OpDef::ArgDef& arg) {
return !arg.type_attr().empty() || !arg.number_attr().empty() ||
!arg.type_list_attr().empty();
};
const auto& input = func.signature().input_arg();
const auto& output = func.signature().output_arg();
return std::any_of(input.begin(), input.end(), is_type_parametrized) ||
std::any_of(output.begin(), output.end(), is_type_parametrized);
}
bool HasParametrizedBody(const FunctionDef& func) {
const auto is_parametrized = [&](const NodeDef& node) {
for (const auto& attr : node.attr()) {
if (!attr.second.placeholder().empty()) return true;
}
return false;
};
return std::any_of(func.node_def().begin(), func.node_def().end(),
is_parametrized);
}
bool IsParametrized(const FunctionDef& func) {
return HasParametrizedType(func) || HasParametrizedBody(func);
}
Status InstantiationTypeParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, DataType>* type_parameters) {
if (!type_parameters->empty()) {
return absl::InvalidArgumentError(
"Type parameters output map must be empty");
}
const auto resolve_type_attr = [&](const OpDef::ArgDef& arg) -> Status {
if (!arg.type_attr().empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(
GetNodeAttr(func_instantiation_attr, arg.type_attr(), &dtype));
type_parameters->emplace(arg.type_attr(), dtype);
} else if (!arg.type_list_attr().empty()) {
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(
GetNodeAttr(func_instantiation_attr, arg.type_list_attr(), &dtypes));
int index = 0;
for (const DataType& dtype : dtypes) {
type_parameters->emplace(absl::StrCat(arg.type_list_attr(), ":", index),
dtype);
++index;
}
}
return absl::OkStatus();
};
for (const auto& input : func.signature().input_arg())
TF_RETURN_IF_ERROR(resolve_type_attr(input));
for (const auto& output : func.signature().output_arg())
TF_RETURN_IF_ERROR(resolve_type_attr(output));
return absl::OkStatus();
}
Status InstantiationBodyParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, AttrValue>* body_parameters) {
if (!body_parameters->empty()) {
return absl::InvalidArgumentError(
"Body parameters output map must be empty");
}
for (const NodeDef& func_body_node : func.node_def()) {
for (auto& attr : func_body_node.attr()) {
const string& placeholder = attr.second.placeholder();
if (placeholder.empty() || body_parameters->contains(placeholder)) {
continue;
}
const AttrValue* placeholder_value =
func_instantiation_attr.Find(placeholder);
if (placeholder_value) {
body_parameters->insert({placeholder, *placeholder_value});
} else {
return absl::InvalidArgumentError(
absl::StrCat("Can't resolve placeholder: ", placeholder));
}
}
}
return absl::OkStatus();
}
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const AttrSlice& func_instantiation_attr,
const FunctionLibraryDefinition& flib,
const int graph_def_version,
GrapplerFunctionItem* item) {
const OpDef& signature = func.signature();
if (signature.name().empty()) {
return absl::InvalidArgumentError("Function name must be specified");
}
for (const OpDef::AttrDef& attr : signature.attr()) {
if (attr.type() != "type") {
return absl::InvalidArgumentError(
"Function signature must have only type attributes");
}
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(func, func_instantiation_attr, &flib, &fbody));
GraphDef function_body;
fbody->graph->ToGraphDef(&function_body);
*function_body.mutable_library() = flib.ReachableDefinitions(func).ToProto();
VLOG(3) << absl::Substitute(
"Deleted $0 unreachable functions from the Grappler function item "
"instantiation of $1 (library size = $2)",
flib.num_functions() - function_body.library().function_size(),
signature.name(), function_body.library().function_size());
const int num_instantiated_inputs = fbody->arg_types.size();
const int num_instantiated_outputs = fbody->ret_types.size();
std::vector<InputArgInstantiation> inputs;
inputs.reserve(num_instantiated_inputs);
for (int in_id = 0; in_id < num_instantiated_inputs; ++in_id) {
const Node* node = fbody->arg_nodes[in_id];
const DataType& dtype = fbody->arg_types[in_id];
inputs.emplace_back(node->name(), dtype);
}
std::vector<OutputArgInstantiation> outputs;
outputs.reserve(num_instantiated_outputs);
for (int out_id = 0; out_id < num_instantiated_outputs; ++out_id) {
const Node* node = fbody->ret_nodes[out_id];
const DataType& dtype = fbody->ret_types[out_id];
outputs.emplace_back(node->name(), dtype);
}
std::vector<ControlOutput> control_outputs;
control_outputs.reserve(func.control_ret_size());
for (const auto& control_ret : func.control_ret()) {
control_outputs.push_back({control_ret.first, control_ret.second});
}
std::sort(control_outputs.begin(), control_outputs.end());
std::vector<const FunctionDef::ArgAttrs*> arg_attr(inputs.size(), nullptr);
for (const auto& attr : func.arg_attr()) {
if (attr.first >= inputs.size()) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid attribute index, got ", attr.first,
" but expected less than ", inputs.size()));
}
arg_attr.at(attr.first) = &attr.second;
}
*item = GrapplerFunctionItem(
signature.name(),
signature.description(),
AttrSlice(&func.attr()), std::move(arg_attr),
std::move(inputs), std::move(outputs), std::move(control_outputs),
graph_def_version, signature.is_stateful(), std::move(function_body));
return absl::OkStatus();
}
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const FunctionLibraryDefinition& flib,
const int graph_def_version,
GrapplerFunctionItem* item) {
return MakeGrapplerFunctionItem(func, AttrSlice(), flib, graph_def_version,
item);
}
Status ReplaceInputWithConst(const NodeDef& input_const, int input_index,
GrapplerFunctionItem* item) {
if (!IsConstant(input_const)) {
return absl::InvalidArgumentError(absl::StrCat(
"Input node is not a constant: ", SummarizeNodeDef(input_const)));
}
const int item_input_size = item->input_size();
if (input_index < 0 || input_index >= item_input_size) {
return absl::InvalidArgumentError(absl::StrCat(
"Function input index is out of bound: index=", input_index,
" input_size=", item->input_size()));
}
const InputArgInstantiation& input_arg = item->input(input_index);
for (NodeDef& node : *item->graph.mutable_node()) {
if (node.name() == input_arg.node_name) {
node = input_const;
node.set_name(input_arg.node_name);
node.clear_input();
node.clear_device();
}
if (IsArg(node)) {
auto attrs = AttrSlice(node);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "index", &index));
if (index >= input_index) {
(*node.mutable_attr())["index"].set_i(index - 1);
}
}
}
item->input_args_.erase(item->input_args_.begin() + input_index);
item->arg_attr_.erase(item->arg_attr_.begin() + input_index);
return absl::OkStatus();
}
Status RemoveFunctionOutputs(const absl::flat_hash_set<int>& remove_outputs,
GrapplerFunctionItem* item,
std::vector<std::pair<int, int>>* output_mapping) {
DCHECK(output_mapping->empty());
for (int remove_output : remove_outputs) {
const int item_output_size = item->output_size();
if (remove_output < 0 || remove_output >= item_output_size) {
return absl::InvalidArgumentError(absl::StrCat(
"Function output index is out of bound: index=", remove_output,
" output_size=", item->output_size()));
}
}
absl::flat_hash_set<const OutputArgInstantiation*> remove_output_args;
const auto is_remove_output_arg = [&](const OutputArgInstantiation& output) {
return remove_output_args.find(&output) != remove_output_args.end();
};
for (int i = 0, end = item->output_size(); i < end; ++i) {
const OutputArgInstantiation& output = item->output(i);
if (remove_outputs.contains(i)) {
VLOG(3) << "Remove functions output: name=" << output.node_name
<< "(index = " << i << ")";
remove_output_args.insert(&output);
} else if (!remove_output_args.empty()) {
output_mapping->push_back({i, i - remove_output_args.size()});
}
}
for (NodeDef& node : *item->graph.mutable_node()) {
if (IsRetval(node)) {
auto attrs = AttrSlice(node);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "index", &index));
for (const auto& mapping : *output_mapping) {
const int from = mapping.first;
const int to = mapping.second;
if (index == from) {
(*node.mutable_attr())["index"].set_i(to);
}
}
}
}
auto& o = item->output_args_;
o.erase(std::remove_if(o.begin(), o.end(), is_remove_output_arg), o.end());
return absl::OkStatus();
}
namespace {
class MakeFunctionDefHelper {
public:
MakeFunctionDefHelper() = default;
Status Initialize(const GrapplerFunctionItem& item,
const FunctionLibraryDefinition& flib);
Status AsFunctionDefInput(const string& graph_def_input,
string* func_def_input) const;
Status AsFunctionDefNode(NodeDef* function_body_node) const;
bool IsInputNode(const NodeDef& node) const {
return input_nodes_.contains(node.name());
}
bool IsOutputNode(const NodeDef& node) const {
return output_nodes_.contains(node.name());
}
private:
absl::flat_hash_set<absl::string_view> input_nodes_;
absl::flat_hash_set<absl::string_view> output_nodes_;
absl::flat_hash_map<string, tensorflow::NameRangeMap> function_body_outputs_;
};
Status MakeFunctionDefHelper::Initialize(
const GrapplerFunctionItem& item, const FunctionLibraryDefinition& flib) {
for (const InputArgInstantiation& input_arg : item.inputs()) {
input_nodes_.insert(input_arg.node_name);
}
for (const OutputArgInstantiation& output_arg : item.outputs()) {
output_nodes_.insert(output_arg.node_name);
}
for (const NodeDef& node : item.function_body().node()) {
const OpRegistrationData* registration;
TF_RETURN_IF_ERROR(flib.LookUp(node.op(), ®istration));
tensorflow::NameRangeMap outputs_range_map;
TF_RETURN_IF_ERROR(tensorflow::NameRangesForNode(
node, registration->op_def, nullptr, &outputs_range_map));
function_body_outputs_.emplace(node.name(), std::move(outputs_range_map));
}
return absl::OkStatus();
}
Status MakeFunctionDefHelper::AsFunctionDefInput(const string& graph_def_input,
string* func_def_input) const {
if (IsControlInput(graph_def_input)) {
*func_def_input = graph_def_input;
return absl::OkStatus();
}
const SafeTensorId tensor = ParseTensorName(graph_def_input);
DCHECK_GE(tensor.index(), 0);
const auto is_input = input_nodes_.find(tensor.node());
if (is_input != input_nodes_.end()) {
DCHECK_EQ(tensor.index(), 0);
*func_def_input = tensor.node();
return absl::OkStatus();
}
const auto is_body_output = function_body_outputs_.find(tensor.node());
if (is_body_output != function_body_outputs_.end()) {
const tensorflow::NameRangeMap& outputs_range_map = is_body_output->second;
for (const auto& el : outputs_range_map) {
const auto& output_name = el.first;
const auto& output_range = el.second;
if (tensor.index() >= output_range.first &&
tensor.index() < output_range.second) {
*func_def_input = absl::StrCat(tensor.node(), ":", output_name, ":",
tensor.index() - output_range.first);
return absl::OkStatus();
}
}
}
return absl::InvalidArgumentError(
absl::StrCat("Unknown graph def input: ", graph_def_input));
}
Status MakeFunctionDefHelper::AsFunctionDefNode(
NodeDef* function_body_node) const {
string func_def_input;
for (int i = 0; i < function_body_node->input_size(); ++i) {
TF_RETURN_IF_ERROR(
AsFunctionDefInput(function_body_node->input(i), &func_def_input));
function_body_node->set_input(i, func_def_input);
}
return absl::OkStatus();
}
}
Status MakeFunctionDef(const GrapplerFunctionItem& item,
const FunctionLibraryDefinition& flib,
FunctionDef* func) {
func->mutable_signature()->set_name(item.id);
func->mutable_signature()->set_description(item.description());
func->mutable_signature()->set_is_stateful(item.is_stateful());
MakeFunctionDefHelper helper;
TF_RETURN_IF_ERROR(helper.Initialize(item, flib));
absl::flat_hash_map<absl::string_view, string> output_tensors;
for (const NodeDef& func_body_node : item.function_body().node()) {
if (!helper.IsOutputNode(func_body_node)) continue;
if (func_body_node.input_size() != 1) {
return absl::InternalError(
absl::StrCat("_Retval node must have single input: ",
SummarizeNodeDef(func_body_node)));
}
output_tensors.emplace(func_body_node.name(), func_body_node.input(0));
}
for (const InputArgInstantiation& input_arg : item.inputs()) {
OpDef::ArgDef arg_def;
arg_def.set_name(input_arg.node_name);
arg_def.set_type(input_arg.data_type);
arg_def.set_is_ref(IsRefType(input_arg.data_type));
*func->mutable_signature()->add_input_arg() = arg_def;
}
for (const OutputArgInstantiation& output_arg : item.outputs()) {
const string output_name =
absl::StrReplaceAll(output_arg.node_name, {{"_RetVal", ""}});
OpDef::ArgDef arg_def;
arg_def.set_name(output_name);
arg_def.set_type(output_arg.data_type);
arg_def.set_is_ref(IsRefType(output_arg.data_type));
*func->mutable_signature()->add_output_arg() = arg_def;
auto it = output_tensors.find(output_arg.node_name);
if (it == output_tensors.end()) {
return absl::InternalError(
absl::StrCat("Can't find an output tensor for the output node: ",
output_arg.node_name));
}
TF_RETURN_IF_ERROR(helper.AsFunctionDefInput(
it->second, &(*func->mutable_ret())[output_name]));
}
for (const ControlOutput& control_out : item.control_outputs()) {
func->mutable_control_ret()->insert(
{control_out.output_name, control_out.node_name});
*func->mutable_signature()->add_control_output() = control_out.output_name;
}
for (const auto& attr : item.func_attr()) {
const auto& attr_name = attr.first;
const auto& attr_value = attr.second;
(*func->mutable_attr())[attr_name] = attr_value;
}
for (int i = 0, end = item.arg_attr().size(); i < end; ++i) {
const auto* attr = item.arg_attr().at(i);
if (attr != nullptr) {
(*func->mutable_arg_attr())[i] = *attr;
}
}
for (const NodeDef& func_node : item.function_body().node()) {
if (IsArg(func_node) || IsRetval(func_node) ||
helper.IsInputNode(func_node) || helper.IsOutputNode(func_node))
continue;
NodeDef* func_def_node = func->add_node_def();
*func_def_node = func_node;
TF_RETURN_IF_ERROR(helper.AsFunctionDefNode(func_def_node));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/utils/functions.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/device:CPU:0";
class FunctionsTest : public ::testing::Test {};
TEST_F(FunctionsTest, IsParametrized) {
FunctionDef parametrized_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef non_parametrized_func = FunctionDefHelper::Create(
"MyMul", {"x:float", "y:float"}, {"z:float"}, {},
{{{"output"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "output:z:0"}});
EXPECT_TRUE(HasParametrizedType(parametrized_func));
EXPECT_TRUE(HasParametrizedBody(parametrized_func));
EXPECT_TRUE(IsParametrized(parametrized_func));
EXPECT_FALSE(HasParametrizedType(non_parametrized_func));
EXPECT_FALSE(HasParametrizedBody(non_parametrized_func));
EXPECT_FALSE(IsParametrized(non_parametrized_func));
}
TEST_F(FunctionsTest, InstantiationParameters) {
FunctionDef func = FunctionDefHelper::Create(
"ParametrizedFunc",
{"input1:A", "input2:B", "input3:float", "input4: C"},
{"output1: A", "output2:D"},
{
"A: {float, double}",
"B: {float, int32}",
"C: list(type)",
"D: {float, double}",
},
{{{"output"}, "FakeOp", {"input1", "input2"}, {{"key", "$key"}}}},
{{"x", "cx:output:0"}, {"y", "cy:output:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["key"].set_s("key-value");
func_instantiation_attr["A"].set_type(DT_FLOAT);
func_instantiation_attr["B"].set_type(DT_INT32);
func_instantiation_attr["C"].mutable_list()->add_type(DT_FLOAT);
func_instantiation_attr["C"].mutable_list()->add_type(DT_INT32);
func_instantiation_attr["D"].set_type(DT_DOUBLE);
absl::flat_hash_map<string, DataType> type_parameters;
TF_EXPECT_OK(InstantiationTypeParameters(
func, AttrSlice(&func_instantiation_attr), &type_parameters));
ASSERT_EQ(5, type_parameters.size());
EXPECT_EQ(DT_FLOAT, type_parameters["A"]);
EXPECT_EQ(DT_INT32, type_parameters["B"]);
EXPECT_EQ(DT_FLOAT, type_parameters["C:0"]);
EXPECT_EQ(DT_INT32, type_parameters["C:1"]);
EXPECT_EQ(DT_DOUBLE, type_parameters["D"]);
absl::flat_hash_map<string, AttrValue> body_parameters;
TF_EXPECT_OK(InstantiationBodyParameters(
func, AttrSlice(&func_instantiation_attr), &body_parameters));
ASSERT_EQ(1, body_parameters.size());
EXPECT_EQ("key-value", body_parameters["key"].s());
}
TEST_F(FunctionsTest, FromSimpleFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("XTimesTwo", item.id);
EXPECT_EQ(5, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("y_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "two" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "scale" && ++count) {
EXPECT_EQ("Cast", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("two", node.input(0));
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale", node.input(1));
} else if (node.name() == "y_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ(0, node.attr().at("index").i());
}
}
EXPECT_EQ(5, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithMultiOutputNodes) {
std::vector<FunctionDefHelper::Node> nodes = {
{{"sx"}, "Shape", {"x"}},
{{"sy"}, "Shape", {"y"}},
{{"gx"}, "Identity", {"dz"}},
{{"gy"}, "Neg", {"dz"}},
{{"rx", "ry"}, "BroadcastGradientArgs", {"sx", "sy"}},
{{"sum_gx"}, "Sum", {"gx", "rx"}},
{{"dx"}, "Reshape", {"sum_gx", "sx"}},
{{"sum_gy"}, "Sum", {"gy", "ry"}},
{{"dy"}, "Reshape", {"sum_gy", "sy"}},
};
for (auto &n : nodes) {
if (n.attr.empty() && n.op != "BroadcastGradientArgs") {
n.attr = {{"T", "$T"}};
}
}
FunctionDef func = FunctionDefHelper::Define(
"SubGrad",
{"x: T", "y: T", "dz: T"},
{"dx: T", "dy: T"},
{{"T: {half, float, double}"}},
nodes);
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SubGrad", item.id);
EXPECT_EQ(14, item.function_body().node_size());
ASSERT_EQ(3, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ("y", item.input(1).node_name);
EXPECT_EQ("dz", item.input(2).node_name);
ASSERT_EQ(2, item.output_size());
EXPECT_EQ("dx_RetVal", item.output(0).node_name);
EXPECT_EQ("dy_RetVal", item.output(1).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y" || node.name() == "dz") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : node.name() == "y" ? 1 : 2;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "rx" && ++count) {
EXPECT_EQ("BroadcastGradientArgs", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("sx", node.input(0));
EXPECT_EQ("sy", node.input(1));
} else if (node.name() == "sum_gx" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gx", node.input(0));
EXPECT_EQ("rx", node.input(1));
} else if (node.name() == "sum_gy" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gy", node.input(0));
EXPECT_EQ("rx:1", node.input(1));
} else if (node.name() == "dx_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dx", node.input(0));
} else if (node.name() == "dy_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(1, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dy", node.input(0));
}
}
EXPECT_EQ(8, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithNestedFuncs) {
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
TF_ASSERT_OK(flib.AddFunctionDef(FunctionDefHelper::Define(
"Swap",
{"i0: T", "i1: T"},
{"o0: T", "o1: T"},
{"T: {float, double}"},
{{{"o0"}, "Identity", {"i1"}, {{"T", "$T"}}},
{{"o1"}, "Identity", {"i0"}, {{"T", "$T"}}}})));
FunctionDef func = FunctionDefHelper::Create(
"ManySwapsFirst",
{"x: float", "y: float"},
{"o: float"},
{},
{{{"a0"}, "Swap", {"x", "y"}, {{"T", DT_FLOAT}}, {"x2"}},
{{"a1"}, "Swap", {"a0:o0:0", "a0:o1:0"}, {{"T", DT_FLOAT}}},
{{"x2"}, "Mul", {"x", "x"}, {{"T", DT_FLOAT}}},
{{"y2"}, "Mul", {"y", "y"}, {{"T", DT_FLOAT}}, {"a1"}},
{{"o"}, "Add", {"x2:z:0", "y2:z:0"}, {{"T", DT_FLOAT}}}},
{{"o", "o:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : 1;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "a0" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^x2", node.input(2));
} else if (node.name() == "a1" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("a0", node.input(0));
EXPECT_EQ("a0:1", node.input(1));
} else if (node.name() == "x2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("x", node.input(1));
} else if (node.name() == "y2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^a1", node.input(2));
} else if (node.name() == "o" && ++count) {
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x2", node.input(0));
EXPECT_EQ("y2", node.input(1));
}
}
EXPECT_EQ(7, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithOutputMappings) {
FunctionDef func = FunctionDefHelper::Create(
"Exp_func",
{"in: float"},
{"out: float"},
{},
{{{"Linear_func"}, "Identity", {"in"}, {{"T", DT_FLOAT}}},
{{"Exp"}, "Exp", {"Linear_func:output:0"}, {{"T", DT_FLOAT}}}},
{{"out", "Exp:y:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("out_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "in" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Linear_func" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("in", node.input(0));
} else if (node.name() == "Exp" && ++count) {
EXPECT_EQ("Exp", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("Linear_func", node.input(0));
} else if (node.name() == "out_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("Exp", node.input(0));
}
}
EXPECT_EQ(4, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithoutInput) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"GenerateTwo",
{},
{"o: T"},
{"T: {float, double}"},
{{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"o"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("o_RetVal", item.output(0).node_name);
EXPECT_EQ(3, item.function_body().node_size());
const NodeDef &two = item.function_body().node(0);
EXPECT_EQ("two", two.name());
EXPECT_EQ(0, two.input_size());
const NodeDef &cast = item.function_body().node(1);
EXPECT_EQ("o", cast.name());
EXPECT_EQ(1, cast.input_size());
EXPECT_EQ("two", cast.input(0));
const NodeDef &retval = item.function_body().node(2);
EXPECT_EQ("o_RetVal", retval.name());
EXPECT_EQ(1, retval.input_size());
EXPECT_EQ("o", retval.input(0));
}
TEST_F(FunctionsTest, FromFunctionDefWithSideEffectfulOps) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Define(
"SideEffects",
{"x: Ref(float)"},
{},
{},
{{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one"}, {{"T", DT_FLOAT}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SideEffects", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
const auto &opts = item.optimization_options();
EXPECT_FALSE(opts.allow_pruning_stateful_and_dataset_ops);
}
TEST_F(FunctionsTest, FromFunctionDefWithControlOutputs) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Create(
"WithControlOutputs", {"x: Ref(float)"}, {}, {},
{
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
},
{}, {{"side_effects", "update"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("WithControlOutputs", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
ASSERT_EQ(1, item.keep_ops.size());
EXPECT_EQ("update", item.keep_ops[0]);
ASSERT_EQ(1, item.control_output_size());
const ControlOutput &ctrl = item.control_outputs()[0];
EXPECT_EQ("side_effects", ctrl.output_name);
EXPECT_EQ("update", ctrl.node_name);
}
TEST_F(FunctionsTest, MakeFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
const uint32 arg_index = 0;
const std::pair<string, string> arg_attr_key_and_value = {"_arg_attr", "abc"};
FunctionDef::ArgAttrs arg_attr;
(*arg_attr.mutable_attr())[arg_attr_key_and_value.first].set_s(
arg_attr_key_and_value.second);
(*func.mutable_arg_attr())[arg_index] = arg_attr;
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
EXPECT_EQ("x", specialized.signature().input_arg(0).name());
EXPECT_EQ(DT_FLOAT, specialized.signature().input_arg(0).type());
EXPECT_EQ("y", specialized.signature().output_arg(0).name());
EXPECT_EQ(DT_FLOAT, specialized.signature().output_arg(0).type());
EXPECT_EQ(specialized.arg_attr().size(), 1);
EXPECT_EQ(specialized.arg_attr().at(arg_index).attr().size(), 1);
EXPECT_EQ(specialized.arg_attr()
.at(arg_index)
.attr()
.at(arg_attr_key_and_value.first)
.s(),
arg_attr_key_and_value.second);
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "scale" && ++count) {
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale:y:0", node.input(1));
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
}
}
EXPECT_EQ(2, count);
}
TEST_F(FunctionsTest, ReplaceInputWithConst) {
FunctionDef func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(2, item.input_size());
EXPECT_EQ(1, item.output_size());
ASSERT_EQ(4, item.function_body().node_size());
const NodeDef &input_x = item.function_body().node(0);
const NodeDef &input_y = item.function_body().node(1);
EXPECT_EQ("_Arg", input_x.op());
EXPECT_EQ("_Arg", input_y.op());
NodeDef const_input_x;
const_input_x.set_op("Const");
AddNodeAttr("Tag", "const_input_x", &const_input_x);
NodeDef const_input_y;
const_input_y.set_op("Const");
AddNodeAttr("Tag", "const_input_y", &const_input_y);
TF_EXPECT_OK(ReplaceInputWithConst(const_input_x, 0, &item));
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("Const", input_x.op());
EXPECT_EQ("const_input_x", input_x.attr().at("Tag").s());
TF_EXPECT_OK(ReplaceInputWithConst(const_input_y, 0, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ("Const", input_y.op());
EXPECT_EQ("const_input_y", input_y.attr().at("Tag").s());
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
EXPECT_EQ(0, specialized.signature().input_arg_size());
EXPECT_EQ(1, specialized.signature().output_arg_size());
EXPECT_EQ(3, specialized.node_def_size());
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ("const_input_x", node.attr().at("Tag").s());
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ("const_input_y", node.attr().at("Tag").s());
} else if (node.name() == "output" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("x:output:0", node.input(0));
EXPECT_EQ("y:output:0", node.input(1));
}
}
EXPECT_EQ(3, count);
}
TEST_F(FunctionsTest, SwapFunctionBodyAndMakeFunctionDef) {
using ::tensorflow::test::function::NDef;
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
GraphDef id_func_body = test::function::GDef(
{
NDef("read_x", "Identity", {"x"}, {{"T", "float"}}),
NDef("z_RetVal", "_Retval", {"read_x"}, {{"T", "float"}})});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionDefLibrary lib_def;
*lib_def.add_function() = func;
*lib_def.add_function() = mul_func;
FunctionLibraryDefinition flib(OpRegistry::Global(), lib_def);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
item.SwapFunctionBody(std::move(id_func_body));
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "read_x" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("x", node.input(0));
}
}
EXPECT_EQ(1, count);
EXPECT_EQ("read_x:output:0", (*specialized.mutable_ret())["z"]);
}
TEST_F(FunctionsTest, FunctionDefGrapplerFunctionItemRoundTrip) {
FunctionDef func = FunctionDefHelper::Create(
"DoNothing", {"i: int32"}, {"o: int32"},
{},
{
{{"id"}, "Identity", {"i"}, {{"T", DT_INT32}}},
},
{{"o", "id:output:0"}},
{{"must_execute", "id"}});
constexpr char description[] = "This is a helpful description.";
func.mutable_signature()->set_description(description);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_INT32);
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
FunctionDef func2;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &func2));
EXPECT_TRUE(FunctionDefsEqual(func, func2));
}
}
}
} | #include "tensorflow/core/grappler/utils/functions.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/strings/scanner.h"
namespace tensorflow {
namespace grappler {
GrapplerFunctionItem::GrapplerFunctionItem(
string func_name, string description, AttrSlice func_attr,
std::vector<const FunctionDef::ArgAttrs*> arg_attr,
std::vector<InputArgInstantiation> input_args,
std::vector<OutputArgInstantiation> output_args,
std::vector<ControlOutput> control_outputs, const int graph_def_version,
const bool is_stateful, GraphDef&& function_body)
: description_(std::move(description)),
func_attr_(func_attr),
arg_attr_(std::move(arg_attr)),
input_args_(std::move(input_args)),
output_args_(std::move(output_args)),
control_outputs_(std::move(control_outputs)),
is_stateful_(is_stateful) {
id = std::move(func_name);
graph = std::move(function_body);
graph.mutable_versions()->set_producer(graph_def_version);
for (const InputArgInstantiation& input_arg : input_args_) {
feed.push_back({input_arg.node_name, Tensor()});
}
for (const OutputArgInstantiation& output_arg : output_args_) {
fetch.push_back(output_arg.node_name);
}
for (const ControlOutput& control_output : control_outputs_) {
keep_ops.push_back(control_output.node_name);
}
optimization_options().allow_pruning_stateful_and_dataset_ops = false;
} | TEST_F(FunctionsTest, FromSimpleFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("XTimesTwo", item.id);
EXPECT_EQ(5, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("y_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "two" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "scale" && ++count) {
EXPECT_EQ("Cast", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("two", node.input(0));
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale", node.input(1));
} else if (node.name() == "y_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ(0, node.attr().at("index").i());
}
}
EXPECT_EQ(5, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithMultiOutputNodes) {
std::vector<FunctionDefHelper::Node> nodes = {
{{"sx"}, "Shape", {"x"}},
{{"sy"}, "Shape", {"y"}},
{{"gx"}, "Identity", {"dz"}},
{{"gy"}, "Neg", {"dz"}},
{{"rx", "ry"}, "BroadcastGradientArgs", {"sx", "sy"}},
{{"sum_gx"}, "Sum", {"gx", "rx"}},
{{"dx"}, "Reshape", {"sum_gx", "sx"}},
{{"sum_gy"}, "Sum", {"gy", "ry"}},
{{"dy"}, "Reshape", {"sum_gy", "sy"}},
};
for (auto &n : nodes) {
if (n.attr.empty() && n.op != "BroadcastGradientArgs") {
n.attr = {{"T", "$T"}};
}
}
FunctionDef func = FunctionDefHelper::Define(
"SubGrad",
{"x: T", "y: T", "dz: T"},
{"dx: T", "dy: T"},
{{"T: {half, float, double}"}},
nodes);
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SubGrad", item.id);
EXPECT_EQ(14, item.function_body().node_size());
ASSERT_EQ(3, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ("y", item.input(1).node_name);
EXPECT_EQ("dz", item.input(2).node_name);
ASSERT_EQ(2, item.output_size());
EXPECT_EQ("dx_RetVal", item.output(0).node_name);
EXPECT_EQ("dy_RetVal", item.output(1).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y" || node.name() == "dz") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : node.name() == "y" ? 1 : 2;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "rx" && ++count) {
EXPECT_EQ("BroadcastGradientArgs", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("sx", node.input(0));
EXPECT_EQ("sy", node.input(1));
} else if (node.name() == "sum_gx" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gx", node.input(0));
EXPECT_EQ("rx", node.input(1));
} else if (node.name() == "sum_gy" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gy", node.input(0));
EXPECT_EQ("rx:1", node.input(1));
} else if (node.name() == "dx_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dx", node.input(0));
} else if (node.name() == "dy_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(1, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dy", node.input(0));
}
}
EXPECT_EQ(8, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithNestedFuncs) {
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
TF_ASSERT_OK(flib.AddFunctionDef(FunctionDefHelper::Define(
"Swap",
{"i0: T", "i1: T"},
{"o0: T", "o1: T"},
{"T: {float, double}"},
{{{"o0"}, "Identity", {"i1"}, {{"T", "$T"}}},
{{"o1"}, "Identity", {"i0"}, {{"T", "$T"}}}})));
FunctionDef func = FunctionDefHelper::Create(
"ManySwapsFirst",
{"x: float", "y: float"},
{"o: float"},
{},
{{{"a0"}, "Swap", {"x", "y"}, {{"T", DT_FLOAT}}, {"x2"}},
{{"a1"}, "Swap", {"a0:o0:0", "a0:o1:0"}, {{"T", DT_FLOAT}}},
{{"x2"}, "Mul", {"x", "x"}, {{"T", DT_FLOAT}}},
{{"y2"}, "Mul", {"y", "y"}, {{"T", DT_FLOAT}}, {"a1"}},
{{"o"}, "Add", {"x2:z:0", "y2:z:0"}, {{"T", DT_FLOAT}}}},
{{"o", "o:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : 1;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "a0" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^x2", node.input(2));
} else if (node.name() == "a1" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("a0", node.input(0));
EXPECT_EQ("a0:1", node.input(1));
} else if (node.name() == "x2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("x", node.input(1));
} else if (node.name() == "y2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^a1", node.input(2));
} else if (node.name() == "o" && ++count) {
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x2", node.input(0));
EXPECT_EQ("y2", node.input(1));
}
}
EXPECT_EQ(7, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithOutputMappings) {
FunctionDef func = FunctionDefHelper::Create(
"Exp_func",
{"in: float"},
{"out: float"},
{},
{{{"Linear_func"}, "Identity", {"in"}, {{"T", DT_FLOAT}}},
{{"Exp"}, "Exp", {"Linear_func:output:0"}, {{"T", DT_FLOAT}}}},
{{"out", "Exp:y:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("out_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "in" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Linear_func" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("in", node.input(0));
} else if (node.name() == "Exp" && ++count) {
EXPECT_EQ("Exp", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("Linear_func", node.input(0));
} else if (node.name() == "out_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("Exp", node.input(0));
}
}
EXPECT_EQ(4, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithoutInput) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"GenerateTwo",
{},
{"o: T"},
{"T: {float, double}"},
{{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"o"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("o_RetVal", item.output(0).node_name);
EXPECT_EQ(3, item.function_body().node_size());
const NodeDef &two = item.function_body().node(0);
EXPECT_EQ("two", two.name());
EXPECT_EQ(0, two.input_size());
const NodeDef &cast = item.function_body().node(1);
EXPECT_EQ("o", cast.name());
EXPECT_EQ(1, cast.input_size());
EXPECT_EQ("two", cast.input(0));
const NodeDef &retval = item.function_body().node(2);
EXPECT_EQ("o_RetVal", retval.name());
EXPECT_EQ(1, retval.input_size());
EXPECT_EQ("o", retval.input(0));
}
TEST_F(FunctionsTest, FromFunctionDefWithSideEffectfulOps) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Define(
"SideEffects",
{"x: Ref(float)"},
{},
{},
{{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one"}, {{"T", DT_FLOAT}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SideEffects", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
const auto &opts = item.optimization_options();
EXPECT_FALSE(opts.allow_pruning_stateful_and_dataset_ops);
}
TEST_F(FunctionsTest, FromFunctionDefWithControlOutputs) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Create(
"WithControlOutputs", {"x: Ref(float)"}, {}, {},
{
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
},
{}, {{"side_effects", "update"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("WithControlOutputs", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
ASSERT_EQ(1, item.keep_ops.size());
EXPECT_EQ("update", item.keep_ops[0]);
ASSERT_EQ(1, item.control_output_size());
const ControlOutput &ctrl = item.control_outputs()[0];
EXPECT_EQ("side_effects", ctrl.output_name);
EXPECT_EQ("update", ctrl.node_name);
} |
#include "quiche/quic/core/quic_packets.h"
#include <algorithm>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
QuicConnectionId GetServerConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_SERVER) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionId GetClientConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionId GetServerConnectionIdAsSender(const QuicPacketHeader& header,
Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionIdIncluded GetServerConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id_included;
}
return header.source_connection_id_included;
}
QuicConnectionId GetClientConnectionIdAsSender(const QuicPacketHeader& header,
Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.source_connection_id;
}
return header.destination_connection_id;
}
QuicConnectionIdIncluded GetClientConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.source_connection_id_included;
}
return header.destination_connection_id_included;
}
uint8_t GetIncludedConnectionIdLength(
QuicConnectionId connection_id,
QuicConnectionIdIncluded connection_id_included) {
QUICHE_DCHECK(connection_id_included == CONNECTION_ID_PRESENT ||
connection_id_included == CONNECTION_ID_ABSENT);
return connection_id_included == CONNECTION_ID_PRESENT
? connection_id.length()
: 0;
}
uint8_t GetIncludedDestinationConnectionIdLength(
const QuicPacketHeader& header) {
return GetIncludedConnectionIdLength(
header.destination_connection_id,
header.destination_connection_id_included);
}
uint8_t GetIncludedSourceConnectionIdLength(const QuicPacketHeader& header) {
return GetIncludedConnectionIdLength(header.source_connection_id,
header.source_connection_id_included);
}
size_t GetPacketHeaderSize(QuicTransportVersion version,
const QuicPacketHeader& header) {
return GetPacketHeaderSize(
version, GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header), header.version_flag,
header.nonce != nullptr, header.packet_number_length,
header.retry_token_length_length, header.retry_token.length(),
header.length_length);
}
size_t GetPacketHeaderSize(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length) {
if (include_version) {
size_t size = kPacketHeaderTypeSize + kConnectionIdLengthSize +
destination_connection_id_length +
source_connection_id_length + packet_number_length +
kQuicVersionSize;
if (include_diversification_nonce) {
size += kDiversificationNonceSize;
}
if (VersionHasLengthPrefixedConnectionIds(version)) {
size += kConnectionIdLengthSize;
}
QUICHE_DCHECK(
QuicVersionHasLongHeaderLengths(version) ||
retry_token_length_length + retry_token_length + length_length == 0);
if (QuicVersionHasLongHeaderLengths(version)) {
size += retry_token_length_length + retry_token_length + length_length;
}
return size;
}
return kPacketHeaderTypeSize + destination_connection_id_length +
packet_number_length;
}
size_t GetStartOfEncryptedData(QuicTransportVersion version,
const QuicPacketHeader& header) {
return GetPacketHeaderSize(version, header);
}
size_t GetStartOfEncryptedData(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length) {
return GetPacketHeaderSize(
version, destination_connection_id_length, source_connection_id_length,
include_version, include_diversification_nonce, packet_number_length,
retry_token_length_length, retry_token_length, length_length);
}
QuicPacketHeader::QuicPacketHeader()
: destination_connection_id(EmptyQuicConnectionId()),
destination_connection_id_included(CONNECTION_ID_PRESENT),
source_connection_id(EmptyQuicConnectionId()),
source_connection_id_included(CONNECTION_ID_ABSENT),
reset_flag(false),
version_flag(false),
has_possible_stateless_reset_token(false),
packet_number_length(PACKET_4BYTE_PACKET_NUMBER),
type_byte(0),
version(UnsupportedQuicVersion()),
nonce(nullptr),
form(GOOGLE_QUIC_PACKET),
long_packet_type(INITIAL),
possible_stateless_reset_token({}),
retry_token_length_length(quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0),
retry_token(absl::string_view()),
length_length(quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0),
remaining_packet_length(0) {}
QuicPacketHeader::QuicPacketHeader(const QuicPacketHeader& other) = default;
QuicPacketHeader::~QuicPacketHeader() {}
QuicPacketHeader& QuicPacketHeader::operator=(const QuicPacketHeader& other) =
default;
QuicPublicResetPacket::QuicPublicResetPacket()
: connection_id(EmptyQuicConnectionId()), nonce_proof(0) {}
QuicPublicResetPacket::QuicPublicResetPacket(QuicConnectionId connection_id)
: connection_id(connection_id), nonce_proof(0) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket()
: connection_id(EmptyQuicConnectionId()) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket(
QuicConnectionId connection_id)
: connection_id(connection_id) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket(
const QuicVersionNegotiationPacket& other) = default;
QuicVersionNegotiationPacket::~QuicVersionNegotiationPacket() {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket()
: stateless_reset_token({}) {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket(
const QuicPacketHeader& header, StatelessResetToken token)
: header(header), stateless_reset_token(token) {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket(
const QuicIetfStatelessResetPacket& other) = default;
QuicIetfStatelessResetPacket::~QuicIetfStatelessResetPacket() {}
std::ostream& operator<<(std::ostream& os, const QuicPacketHeader& header) {
os << "{ destination_connection_id: " << header.destination_connection_id
<< " ("
<< (header.destination_connection_id_included == CONNECTION_ID_PRESENT
? "present"
: "absent")
<< "), source_connection_id: " << header.source_connection_id << " ("
<< (header.source_connection_id_included == CONNECTION_ID_PRESENT
? "present"
: "absent")
<< "), packet_number_length: "
<< static_cast<int>(header.packet_number_length)
<< ", reset_flag: " << header.reset_flag
<< ", version_flag: " << header.version_flag;
if (header.version_flag) {
os << ", version: " << ParsedQuicVersionToString(header.version);
if (header.long_packet_type != INVALID_PACKET_TYPE) {
os << ", long_packet_type: "
<< QuicUtils::QuicLongHeaderTypetoString(header.long_packet_type);
}
if (header.retry_token_length_length !=
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0) {
os << ", retry_token_length_length: "
<< static_cast<int>(header.retry_token_length_length);
}
if (header.retry_token.length() != 0) {
os << ", retry_token_length: " << header.retry_token.length();
}
if (header.length_length != quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0) {
os << ", length_length: " << static_cast<int>(header.length_length);
}
if (header.remaining_packet_length != 0) {
os << ", remaining_packet_length: " << header.remaining_packet_length;
}
}
if (header.nonce != nullptr) {
os << ", diversification_nonce: "
<< absl::BytesToHexString(
absl::string_view(header.nonce->data(), header.nonce->size()));
}
os << ", packet_number: " << header.packet_number << " }\n";
return os;
}
QuicData::QuicData(const char* buffer, size_t length)
: buffer_(buffer), length_(length), owns_buffer_(false) {}
QuicData::QuicData(const char* buffer, size_t length, bool owns_buffer)
: buffer_(buffer), length_(length), owns_buffer_(owns_buffer) {}
QuicData::QuicData(absl::string_view packet_data)
: buffer_(packet_data.data()),
length_(packet_data.length()),
owns_buffer_(false) {}
QuicData::~QuicData() {
if (owns_buffer_) {
delete[] const_cast<char*>(buffer_);
}
}
QuicPacket::QuicPacket(
char* buffer, size_t length, bool owns_buffer,
uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool includes_version,
bool includes_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length)
: QuicData(buffer, length, owns_buffer),
buffer_(buffer),
destination_connection_id_length_(destination_connection_id_length),
source_connection_id_length_(source_connection_id_length),
includes_version_(includes_version),
includes_diversification_nonce_(includes_diversification_nonce),
packet_number_length_(packet_number_length),
retry_token_length_length_(retry_token_length_length),
retry_token_length_(retry_token_length),
length_length_(length_length) {}
QuicPacket::QuicPacket(QuicTransportVersion , char* buffer,
size_t length, bool owns_buffer,
const QuicPacketHeader& header)
: QuicPacket(buffer, length, owns_buffer,
GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header),
header.version_flag, header.nonce != nullptr,
header.packet_number_length, header.retry_token_length_length,
header.retry_token.length(), header.length_length) {}
QuicEncryptedPacket::QuicEncryptedPacket(const char* buffer, size_t length)
: QuicData(buffer, length) {}
QuicEncryptedPacket::QuicEncryptedPacket(const char* buffer, size_t length,
bool owns_buffer)
: QuicData(buffer, length, owns_buffer) {}
QuicEncryptedPacket::QuicEncryptedPacket(absl::string_view data)
: QuicData(data) {}
std::unique_ptr<QuicEncryptedPacket> QuicEncryptedPacket::Clone() const {
char* buffer = new char[this->length()];
std::copy(this->data(), this->data() + this->length(), buffer);
return std::make_unique<QuicEncryptedPacket>(buffer, this->length(), true);
}
std::ostream& operator<<(std::ostream& os, const QuicEncryptedPacket& s) {
os << s.length() << "-byte data";
return os;
}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time)
: QuicReceivedPacket(buffer, length, receipt_time,
false ) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer)
: QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, 0 ,
true ) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid)
: quic::QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, ttl,
ttl_valid, nullptr ,
0 ,
false , ECN_NOT_ECT) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid,
char* packet_headers,
size_t headers_length,
bool owns_header_buffer)
: quic::QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, ttl,
ttl_valid, packet_headers, headers_length,
owns_header_buffer, ECN_NOT_ECT) {}
QuicReceivedPacket::QuicReceivedPacket(
const char* buffer, size_t length, QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid, char* packet_headers, size_t headers_length,
bool owns_header_buffer, QuicEcnCodepoint ecn_codepoint)
: QuicEncryptedPacket(buffer, length, owns_buffer),
receipt_time_(receipt_time),
ttl_(ttl_valid ? ttl : -1),
packet_headers_(packet_headers),
headers_length_(headers_length),
owns_header_buffer_(owns_header_buffer),
ecn_codepoint_(ecn_codepoint) {}
QuicReceivedPacket::~QuicReceivedPacket() {
if (owns_header_buffer_) {
delete[] static_cast<char*>(packet_headers_);
}
}
std::unique_ptr<QuicReceivedPacket> QuicReceivedPacket::Clone() const {
char* buffer = new char[this->length()];
memcpy(buffer, this->data(), this->length());
if (this->packet_headers()) {
char* headers_buffer = new char[this->headers_length()];
memcpy(headers_buffer, this->packet_headers(), this->headers_length());
return std::make_unique<QuicReceivedPacket>(
buffer, this->length(), receipt_time(), true, ttl(), ttl() >= 0,
headers_buffer, this->headers_length(), true, this->ecn_codepoint());
}
return std::make_unique<QuicReceivedPacket>(
buffer, this->length(), receipt_time(), true, ttl(), ttl() >= 0, nullptr,
0, false, this->ecn_codepoint());
}
std::ostream& operator<<(std::ostream& os, const QuicReceivedPacket& s) {
os << s.length() << "-byte data";
return os;
}
absl::string_view QuicPacket::AssociatedData(
QuicTransportVersion version) const {
return absl::string_view(
data(),
GetStartOfEncryptedData(version, destination_connection_id_length_,
source_connection_id_length_, includes_version_,
includes_diversification_nonce_,
packet_number_length_, retry_token_length_length_,
retry_token_length_, length_length_));
}
absl::string_view QuicPacket::Plaintext(QuicTransportVersion version) const {
const size_t start_of_encrypted_data = GetStartOfEncryptedData(
version, destination_connection_id_length_, source_connection_id_length_,
includes_version_, includes_diversification_nonce_, packet_number_length_,
retry_token_length_length_, retry_token_length_, length_length_);
return absl::string_view(data() + start_of_encrypted_data,
length() - start_of_encrypted_data);
}
SerializedPacket::SerializedPacket(QuicPacketNumber packet_number,
QuicPacketNumberLength packet_number_length,
const char* encrypted_buffer,
QuicPacketLength encrypted_length,
bool has_ack, bool has_stop_waiting)
: encrypted_buffer(encrypted_buffer),
encrypted_length(encrypted_length),
has_crypto_handshake(NOT_HANDSHAKE),
packet_number(packet_number),
packet_number_length(packet_number_length),
encryption_level(ENCRYPTION_INITIAL),
has_ack(has_ack),
has_stop_waiting(has_stop_waiting),
transmission_type(NOT_RETRANSMISSION),
has_ack_frame_copy(false),
has_ack_frequency(false),
has_message(false),
fate(SEND_TO_WRITER) {}
SerializedPacket::SerializedPacket(SerializedPacket&& other)
: has_crypto_handshake(other.has_crypto_handshake),
packet_number(other.packet_number),
packet_number_length(other.packet_number_length),
encryption_level(other.encryption_level),
has_ack(other.has_ack),
has_stop_waiting(other.has_stop_waiting),
has_ack_ecn(other.has_ack_ecn),
transmission_type(other.transmission_type),
largest_acked(other.largest_acked),
has_ack_frame_copy(other.has_ack_frame_copy),
has_ack_frequency(other.has_ack_frequency),
has_message(other.has_message),
fate(other.fate),
peer_address(other.peer_address),
bytes_not_retransmitted(other.bytes_not_retransmitted),
initial_header(other.initial_header) {
if (this != &other) {
if (release_encrypted_buffer && encrypted_buffer != nullptr) {
release_encrypted_buffer(encrypted_buffer);
}
encrypted_buffer = other.encrypted_buffer;
encrypted_length = other.encrypted_length;
release_encrypted_buffer = std::move(other.release_encrypted_buffer);
other.release_encrypted_buffer = nullptr;
retransmittable_frames.swap(other.retransmittable_frames);
nonretransmittable_frames.swap(other.nonretransmittable_frames);
}
}
SerializedPacket::~SerializedPacket() {
if (release_encrypted_buffer && encrypted_buffer != nullptr) {
release_encrypted_buffer(encrypted_buffer);
}
if (!retransmittable_frames.empty()) {
DeleteFrames(&retransmittable_frames);
}
for (auto& frame : nonretransmittable_frames) {
if (!has_ack_frame_copy && frame.type == ACK_FRAME) {
continue;
}
DeleteFrame(&frame);
}
}
SerializedPacket* CopySerializedPacket(const SerializedPacket& serialized,
quiche::QuicheBufferAllocator* allocator,
bool copy_buffer) {
SerializedPacket* copy = new SerializedPacket(
serialized.packet_number, serialized.packet_number_length,
serialized.encrypted_buffer, serialized.encrypted_length,
serialized.has_ack, serialized.has_stop_waiting);
copy->has_crypto_handshake = serialized.has_crypto_handshake;
copy->encryption_level = serialized.encryption_level;
copy->transmission_type = serialized.transmission_type;
copy->largest_acked = serialized.largest_acked;
copy->has_ack_frequency = serialized.has_ack_frequency;
copy->has_message = serialized.has_message;
copy->fate = serialized.fate;
copy->peer_address = serialized.peer_address;
copy->bytes_not_retransmitted = serialized.bytes_not_retransmitted;
copy->initial_header = serialized.initial_header;
copy->has_ack_ecn = serialized.has_ack_ecn;
if (copy_buffer) {
copy->encrypted_buffer = CopyBuffer(serialized);
copy->release_encrypted_buffer = [](const char* p) { delete[] p; };
}
copy->retransmittable_frames =
CopyQuicFrames(allocator, serialized.retransmittable_frames);
QUICHE_DCHECK(copy->nonretransmittable_frames.empty());
for (const auto& frame : serialized.nonretransmittable_frames) {
if (frame.type == ACK_FRAME) {
copy->has_ack_frame_copy = true;
}
copy->nonretransmittable_frames.push_back(CopyQuicFrame(allocator, frame));
}
return copy;
}
char* CopyBuffer(const SerializedPacket& packet) {
return CopyBuffer(packet.encrypted_buffer, packet.encrypted_length);
}
char* CopyBuffer(const char* encrypted_buffer,
QuicPacketLength encrypted_length) {
char* dst_buffer = new char[encrypted_length];
memcpy(dst_buffer, encrypted_buffer, encrypted_length);
return dst_buffer;
}
ReceivedPacketInfo::ReceivedPacketInfo(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicReceivedPacket& packet)
: self_address(self_address),
peer_address(peer_address),
packet(packet),
form(GOOGLE_QUIC_PACKET),
long_packet_type(INVALID_PACKET_TYPE),
version_flag(false),
use_length_prefix(false),
version_label(0),
version(ParsedQuicVersion::Unsupported()),
destination_connection_id(EmptyQuicConnectionId()),
source_connection_id(EmptyQuicConnectionId()) {}
ReceivedPacketInfo::~ReceivedPacketInfo() {}
std::string ReceivedPacketInfo::ToString() const {
std::string output =
absl::StrCat("{ self_address: ", self_address.ToString(),
", peer_address: ", peer_address.ToString(),
", packet_length: ", packet.length(),
", header_format: ", form, ", version_flag: ", version_flag);
if (version_flag) {
absl::StrAppend(&output, ", version: ", ParsedQuicVersionToString(version));
}
absl::StrAppend(
&output,
", destination_connection_id: ", destination_connection_id.ToString(),
", source_connection_id: ", source_connection_id.ToString(), " }\n");
return output;
}
std::ostream& operator<<(std::ostream& os,
const ReceivedPacketInfo& packet_info) {
os << packet_info.ToString();
return os;
}
bool QuicPacketHeader::operator==(const QuicPacketHeader& other) const {
return destination_connection_id == other.destination_connection_id &&
destination_connection_id_included ==
other.destination_connection_id_included &&
source_connection_id == other.source_connection_id &&
source_connection_id_included == other.source_connection_id_included &&
reset_flag == other.reset_flag && version_flag == other.version_flag &&
has_possible_stateless_reset_token ==
other.has_possible_stateless_reset_token &&
packet_number_length == other.packet_number_length &&
type_byte == other.type_byte && version == other.version &&
nonce == other.nonce &&
((!packet_number.IsInitialized() &&
!other.packet_number.IsInitialized()) ||
(packet_number.IsInitialized() &&
other.packet_number.IsInitialized() &&
packet_number == other.packet_number)) &&
form == other.form && long_packet_type == other.long_packet_type &&
possible_stateless_reset_token ==
other.possible_stateless_reset_token &&
retry_token_length_length == other.retry_token_length_length &&
retry_token == other.retry_token &&
length_length == other.length_length &&
remaining_packet_length == other.remaining_packet_length;
}
bool QuicPacketHeader::operator!=(const QuicPacketHeader& other) const {
return !operator==(other);
}
} | #include "quiche/quic/core/quic_packets.h"
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
namespace {
QuicPacketHeader CreateFakePacketHeader() {
QuicPacketHeader header;
header.destination_connection_id = TestConnectionId(1);
header.destination_connection_id_included = CONNECTION_ID_PRESENT;
header.source_connection_id = TestConnectionId(2);
header.source_connection_id_included = CONNECTION_ID_ABSENT;
return header;
}
class QuicPacketsTest : public QuicTest {};
TEST_F(QuicPacketsTest, GetServerConnectionIdAsRecipient) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(1),
GetServerConnectionIdAsRecipient(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(2),
GetServerConnectionIdAsRecipient(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetServerConnectionIdAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(2),
GetServerConnectionIdAsSender(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(1),
GetServerConnectionIdAsSender(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetServerConnectionIdIncludedAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(CONNECTION_ID_ABSENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_SERVER));
EXPECT_EQ(CONNECTION_ID_PRESENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdIncludedAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(CONNECTION_ID_PRESENT, GetClientConnectionIdIncludedAsSender(
header, Perspective::IS_SERVER));
EXPECT_EQ(CONNECTION_ID_ABSENT, GetClientConnectionIdIncludedAsSender(
header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdAsRecipient) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(2),
GetClientConnectionIdAsRecipient(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(1),
GetClientConnectionIdAsRecipient(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(1),
GetClientConnectionIdAsSender(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(2),
GetClientConnectionIdAsSender(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, CopyQuicPacketHeader) {
QuicPacketHeader header;
QuicPacketHeader header2 = CreateFakePacketHeader();
EXPECT_NE(header, header2);
QuicPacketHeader header3(header2);
EXPECT_EQ(header2, header3);
}
TEST_F(QuicPacketsTest, CopySerializedPacket) {
std::string buffer(1000, 'a');
quiche::SimpleBufferAllocator allocator;
SerializedPacket packet(QuicPacketNumber(1), PACKET_1BYTE_PACKET_NUMBER,
buffer.data(), buffer.length(), false,
false);
packet.retransmittable_frames.push_back(QuicFrame(QuicWindowUpdateFrame()));
packet.retransmittable_frames.push_back(QuicFrame(QuicStreamFrame()));
QuicAckFrame ack_frame(InitAckFrame(1));
packet.nonretransmittable_frames.push_back(QuicFrame(&ack_frame));
packet.nonretransmittable_frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
std::unique_ptr<SerializedPacket> copy = absl::WrapUnique<SerializedPacket>(
CopySerializedPacket(packet, &allocator, true));
EXPECT_EQ(quic::QuicPacketNumber(1), copy->packet_number);
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER, copy->packet_number_length);
ASSERT_EQ(2u, copy->retransmittable_frames.size());
EXPECT_EQ(WINDOW_UPDATE_FRAME, copy->retransmittable_frames[0].type);
EXPECT_EQ(STREAM_FRAME, copy->retransmittable_frames[1].type);
ASSERT_EQ(2u, copy->nonretransmittable_frames.size());
EXPECT_EQ(ACK_FRAME, copy->nonretransmittable_frames[0].type);
EXPECT_EQ(PADDING_FRAME, copy->nonretransmittable_frames[1].type);
EXPECT_EQ(1000u, copy->encrypted_length);
quiche::test::CompareCharArraysWithHexError(
"encrypted_buffer", copy->encrypted_buffer, copy->encrypted_length,
packet.encrypted_buffer, packet.encrypted_length);
std::unique_ptr<SerializedPacket> copy2 = absl::WrapUnique<SerializedPacket>(
CopySerializedPacket(packet, &allocator, false));
EXPECT_EQ(packet.encrypted_buffer, copy2->encrypted_buffer);
EXPECT_EQ(1000u, copy2->encrypted_length);
}
TEST_F(QuicPacketsTest, CloneReceivedPacket) {
char header[4] = "bar";
QuicReceivedPacket packet("foo", 3, QuicTime::Zero(), false, 0, true, header,
sizeof(header) - 1, false,
QuicEcnCodepoint::ECN_ECT1);
std::unique_ptr<QuicReceivedPacket> copy = packet.Clone();
EXPECT_EQ(packet.ecn_codepoint(), copy->ecn_codepoint());
}
}
}
} | QuicConnectionIdIncluded GetServerConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id_included;
}
return header.source_connection_id_included;
} | TEST_F(QuicPacketsTest, GetServerConnectionIdIncludedAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(CONNECTION_ID_ABSENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_SERVER));
EXPECT_EQ(CONNECTION_ID_PRESENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_CLIENT));
} |
#ifndef ABSL_STRINGS_CHARSET_H_
#define ABSL_STRINGS_CHARSET_H_
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "absl/base/macros.h"
#include "absl/base/port.h"
#include "absl/strings/string_view.h"
namespace absl {
class CharSet {
public:
constexpr CharSet() : m_() {}
constexpr explicit CharSet(absl::string_view str) : m_() {
for (char c : str) {
SetChar(static_cast<unsigned char>(c));
}
}
constexpr bool contains(char c) const {
return ((m_[static_cast<unsigned char>(c) / 64] >>
(static_cast<unsigned char>(c) % 64)) &
0x1) == 0x1;
}
constexpr bool empty() const {
for (uint64_t c : m_) {
if (c != 0) return false;
}
return true;
}
static constexpr CharSet Char(char x) {
return CharSet(CharMaskForWord(x, 0), CharMaskForWord(x, 1),
CharMaskForWord(x, 2), CharMaskForWord(x, 3));
}
static constexpr CharSet Range(char lo, char hi) {
return CharSet(RangeForWord(lo, hi, 0), RangeForWord(lo, hi, 1),
RangeForWord(lo, hi, 2), RangeForWord(lo, hi, 3));
}
friend constexpr CharSet operator&(const CharSet& a, const CharSet& b) {
return CharSet(a.m_[0] & b.m_[0], a.m_[1] & b.m_[1], a.m_[2] & b.m_[2],
a.m_[3] & b.m_[3]);
}
friend constexpr CharSet operator|(const CharSet& a, const CharSet& b) {
return CharSet(a.m_[0] | b.m_[0], a.m_[1] | b.m_[1], a.m_[2] | b.m_[2],
a.m_[3] | b.m_[3]);
}
friend constexpr CharSet operator~(const CharSet& a) {
return CharSet(~a.m_[0], ~a.m_[1], ~a.m_[2], ~a.m_[3]);
}
static constexpr CharSet AsciiUppercase() { return CharSet::Range('A', 'Z'); }
static constexpr CharSet AsciiLowercase() { return CharSet::Range('a', 'z'); }
static constexpr CharSet AsciiDigits() { return CharSet::Range('0', '9'); }
static constexpr CharSet AsciiAlphabet() {
return AsciiLowercase() | AsciiUppercase();
}
static constexpr CharSet AsciiAlphanumerics() {
return AsciiDigits() | AsciiAlphabet();
}
static constexpr CharSet AsciiHexDigits() {
return AsciiDigits() | CharSet::Range('A', 'F') | CharSet::Range('a', 'f');
}
static constexpr CharSet AsciiPrintable() {
return CharSet::Range(0x20, 0x7e);
}
static constexpr CharSet AsciiWhitespace() { return CharSet("\t\n\v\f\r "); }
static constexpr CharSet AsciiPunctuation() {
return AsciiPrintable() & ~AsciiWhitespace() & ~AsciiAlphanumerics();
}
private:
constexpr CharSet(uint64_t b0, uint64_t b1, uint64_t b2, uint64_t b3)
: m_{b0, b1, b2, b3} {}
static constexpr uint64_t RangeForWord(char lo, char hi, uint64_t word) {
return OpenRangeFromZeroForWord(static_cast<unsigned char>(hi) + 1, word) &
~OpenRangeFromZeroForWord(static_cast<unsigned char>(lo), word);
}
static constexpr uint64_t OpenRangeFromZeroForWord(uint64_t upper,
uint64_t word) {
return (upper <= 64 * word) ? 0
: (upper >= 64 * (word + 1))
? ~static_cast<uint64_t>(0)
: (~static_cast<uint64_t>(0) >> (64 - upper % 64));
}
static constexpr uint64_t CharMaskForWord(char x, uint64_t word) {
return (static_cast<unsigned char>(x) / 64 == word)
? (static_cast<uint64_t>(1)
<< (static_cast<unsigned char>(x) % 64))
: 0;
}
constexpr void SetChar(unsigned char c) {
m_[c / 64] |= static_cast<uint64_t>(1) << (c % 64);
}
uint64_t m_[4];
};
}
#endif | #include "absl/strings/charset.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
namespace {
constexpr absl::CharSet everything_map = ~absl::CharSet();
constexpr absl::CharSet nothing_map = absl::CharSet();
TEST(Charmap, AllTests) {
const absl::CharSet also_nothing_map("");
EXPECT_TRUE(everything_map.contains('\0'));
EXPECT_FALSE(nothing_map.contains('\0'));
EXPECT_FALSE(also_nothing_map.contains('\0'));
for (unsigned char ch = 1; ch != 0; ++ch) {
SCOPED_TRACE(ch);
EXPECT_TRUE(everything_map.contains(ch));
EXPECT_FALSE(nothing_map.contains(ch));
EXPECT_FALSE(also_nothing_map.contains(ch));
}
const absl::CharSet symbols(absl::string_view("&@#@^!@?", 5));
EXPECT_TRUE(symbols.contains('&'));
EXPECT_TRUE(symbols.contains('@'));
EXPECT_TRUE(symbols.contains('#'));
EXPECT_TRUE(symbols.contains('^'));
EXPECT_FALSE(symbols.contains('!'));
EXPECT_FALSE(symbols.contains('?'));
int cnt = 0;
for (unsigned char ch = 1; ch != 0; ++ch) cnt += symbols.contains(ch);
EXPECT_EQ(cnt, 4);
const absl::CharSet lets(absl::string_view("^abcde", 3));
const absl::CharSet lets2(absl::string_view("fghij\0klmnop", 10));
const absl::CharSet lets3("fghij\0klmnop");
EXPECT_TRUE(lets2.contains('k'));
EXPECT_FALSE(lets3.contains('k'));
EXPECT_FALSE((symbols & lets).empty());
EXPECT_TRUE((lets2 & lets).empty());
EXPECT_FALSE((lets & symbols).empty());
EXPECT_TRUE((lets & lets2).empty());
EXPECT_TRUE(nothing_map.empty());
EXPECT_FALSE(lets.empty());
}
std::string Members(const absl::CharSet& m) {
std::string r;
for (size_t i = 0; i < 256; ++i)
if (m.contains(i)) r.push_back(i);
return r;
}
std::string ClosedRangeString(unsigned char lo, unsigned char hi) {
std::string s;
while (true) {
s.push_back(lo);
if (lo == hi) break;
++lo;
}
return s;
}
TEST(Charmap, Constexpr) {
constexpr absl::CharSet kEmpty = absl::CharSet();
EXPECT_EQ(Members(kEmpty), "");
constexpr absl::CharSet kA = absl::CharSet::Char('A');
EXPECT_EQ(Members(kA), "A");
constexpr absl::CharSet kAZ = absl::CharSet::Range('A', 'Z');
EXPECT_EQ(Members(kAZ), "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
constexpr absl::CharSet kIdentifier =
absl::CharSet::Range('0', '9') | absl::CharSet::Range('A', 'Z') |
absl::CharSet::Range('a', 'z') | absl::CharSet::Char('_');
EXPECT_EQ(Members(kIdentifier),
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_"
"abcdefghijklmnopqrstuvwxyz");
constexpr absl::CharSet kAll = ~absl::CharSet();
for (size_t i = 0; i < 256; ++i) {
SCOPED_TRACE(i);
EXPECT_TRUE(kAll.contains(i));
}
constexpr absl::CharSet kHello = absl::CharSet("Hello, world!");
EXPECT_EQ(Members(kHello), " !,Hdelorw");
constexpr absl::CharSet kABC =
absl::CharSet::Range('A', 'Z') & ~absl::CharSet::Range('D', 'Z');
EXPECT_EQ(Members(kABC), "ABC");
constexpr bool kContainsA = absl::CharSet("abc").contains('a');
EXPECT_TRUE(kContainsA);
constexpr bool kContainsD = absl::CharSet("abc").contains('d');
EXPECT_FALSE(kContainsD);
constexpr bool kEmptyIsEmpty = absl::CharSet().empty();
EXPECT_TRUE(kEmptyIsEmpty);
constexpr bool kNotEmptyIsEmpty = absl::CharSet("abc").empty();
EXPECT_FALSE(kNotEmptyIsEmpty);
}
TEST(Charmap, Range) {
std::vector<size_t> poi = {0, 1, 2, 3, 4, 7, 8, 9, 15,
16, 17, 30, 31, 32, 33, 63, 64, 65,
127, 128, 129, 223, 224, 225, 254, 255};
for (auto lo = poi.begin(); lo != poi.end(); ++lo) {
SCOPED_TRACE(*lo);
for (auto hi = lo; hi != poi.end(); ++hi) {
SCOPED_TRACE(*hi);
EXPECT_EQ(Members(absl::CharSet::Range(*lo, *hi)),
ClosedRangeString(*lo, *hi));
}
}
}
TEST(Charmap, NullByteWithStringView) {
char characters[5] = {'a', 'b', '\0', 'd', 'x'};
absl::string_view view(characters, 5);
absl::CharSet tester(view);
EXPECT_TRUE(tester.contains('a'));
EXPECT_TRUE(tester.contains('b'));
EXPECT_TRUE(tester.contains('\0'));
EXPECT_TRUE(tester.contains('d'));
EXPECT_TRUE(tester.contains('x'));
EXPECT_FALSE(tester.contains('c'));
}
TEST(CharmapCtype, Match) {
for (int c = 0; c < 256; ++c) {
SCOPED_TRACE(c);
SCOPED_TRACE(static_cast<char>(c));
EXPECT_EQ(absl::ascii_isupper(c),
absl::CharSet::AsciiUppercase().contains(c));
EXPECT_EQ(absl::ascii_islower(c),
absl::CharSet::AsciiLowercase().contains(c));
EXPECT_EQ(absl::ascii_isdigit(c), absl::CharSet::AsciiDigits().contains(c));
EXPECT_EQ(absl::ascii_isalpha(c),
absl::CharSet::AsciiAlphabet().contains(c));
EXPECT_EQ(absl::ascii_isalnum(c),
absl::CharSet::AsciiAlphanumerics().contains(c));
EXPECT_EQ(absl::ascii_isxdigit(c),
absl::CharSet::AsciiHexDigits().contains(c));
EXPECT_EQ(absl::ascii_isprint(c),
absl::CharSet::AsciiPrintable().contains(c));
EXPECT_EQ(absl::ascii_isspace(c),
absl::CharSet::AsciiWhitespace().contains(c));
EXPECT_EQ(absl::ascii_ispunct(c),
absl::CharSet::AsciiPunctuation().contains(c));
}
}
} | constexpr explicit CharSet(absl::string_view str) : m_() {
for (char c : str) {
SetChar(static_cast<unsigned char>(c));
}
} | TEST(Charmap, AllTests) {
const absl::CharSet also_nothing_map("");
EXPECT_TRUE(everything_map.contains('\0'));
EXPECT_FALSE(nothing_map.contains('\0'));
EXPECT_FALSE(also_nothing_map.contains('\0'));
for (unsigned char ch = 1; ch != 0; ++ch) {
SCOPED_TRACE(ch);
EXPECT_TRUE(everything_map.contains(ch));
EXPECT_FALSE(nothing_map.contains(ch));
EXPECT_FALSE(also_nothing_map.contains(ch));
}
const absl::CharSet symbols(absl::string_view("&@#@^!@?", 5));
EXPECT_TRUE(symbols.contains('&'));
EXPECT_TRUE(symbols.contains('@'));
EXPECT_TRUE(symbols.contains('#'));
EXPECT_TRUE(symbols.contains('^'));
EXPECT_FALSE(symbols.contains('!'));
EXPECT_FALSE(symbols.contains('?'));
int cnt = 0;
for (unsigned char ch = 1; ch != 0; ++ch) cnt += symbols.contains(ch);
EXPECT_EQ(cnt, 4);
const absl::CharSet lets(absl::string_view("^abcde", 3));
const absl::CharSet lets2(absl::string_view("fghij\0klmnop", 10));
const absl::CharSet lets3("fghij\0klmnop");
EXPECT_TRUE(lets2.contains('k'));
EXPECT_FALSE(lets3.contains('k'));
EXPECT_FALSE((symbols & lets).empty());
EXPECT_TRUE((lets2 & lets).empty());
EXPECT_FALSE((lets & symbols).empty());
EXPECT_TRUE((lets & lets2).empty());
EXPECT_TRUE(nothing_map.empty());
EXPECT_FALSE(lets.empty());
}
TEST(Charmap, Constexpr) {
constexpr absl::CharSet kEmpty = absl::CharSet();
EXPECT_EQ(Members(kEmpty), "");
constexpr absl::CharSet kA = absl::CharSet::Char('A');
EXPECT_EQ(Members(kA), "A");
constexpr absl::CharSet kAZ = absl::CharSet::Range('A', 'Z');
EXPECT_EQ(Members(kAZ), "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
constexpr absl::CharSet kIdentifier =
absl::CharSet::Range('0', '9') | absl::CharSet::Range('A', 'Z') |
absl::CharSet::Range('a', 'z') | absl::CharSet::Char('_');
EXPECT_EQ(Members(kIdentifier),
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_"
"abcdefghijklmnopqrstuvwxyz");
constexpr absl::CharSet kAll = ~absl::CharSet();
for (size_t i = 0; i < 256; ++i) {
SCOPED_TRACE(i);
EXPECT_TRUE(kAll.contains(i));
}
constexpr absl::CharSet kHello = absl::CharSet("Hello, world!");
EXPECT_EQ(Members(kHello), " !,Hdelorw");
constexpr absl::CharSet kABC =
absl::CharSet::Range('A', 'Z') & ~absl::CharSet::Range('D', 'Z');
EXPECT_EQ(Members(kABC), "ABC");
constexpr bool kContainsA = absl::CharSet("abc").contains('a');
EXPECT_TRUE(kContainsA);
constexpr bool kContainsD = absl::CharSet("abc").contains('d');
EXPECT_FALSE(kContainsD);
constexpr bool kEmptyIsEmpty = absl::CharSet().empty();
EXPECT_TRUE(kEmptyIsEmpty);
constexpr bool kNotEmptyIsEmpty = absl::CharSet("abc").empty();
EXPECT_FALSE(kNotEmptyIsEmpty);
}
TEST(Charmap, NullByteWithStringView) {
char characters[5] = {'a', 'b', '\0', 'd', 'x'};
absl::string_view view(characters, 5);
absl::CharSet tester(view);
EXPECT_TRUE(tester.contains('a'));
EXPECT_TRUE(tester.contains('b'));
EXPECT_TRUE(tester.contains('\0'));
EXPECT_TRUE(tester.contains('d'));
EXPECT_TRUE(tester.contains('x'));
EXPECT_FALSE(tester.contains('c'));
} |
#include "tensorflow/lite/experimental/shlo/ops/sqrt.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Sqrt {
template <class T>
T operator()(T v) const {
return std::sqrt(v);
}
};
template <>
F16 Sqrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Sqrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
SqrtOp Create(SqrtOp::Attributes) { return {}; }
absl::Status Prepare(SqrtOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("sqrt"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("sqrt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(SqrtOp& op, const Tensor& input, Tensor& output) {
Sqrt sqrt;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), sqrt, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
sqrt, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.sqrt: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Sqrt, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_SQRT,
xnnpack_delegate.get());
}
TEST(Sqrt, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
}
} | #include "tensorflow/lite/experimental/shlo/ops/sqrt.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Sqrt {
template <class T>
T operator()(T v) const {
return std::sqrt(v);
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Sqrt, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_SQRT,
xnnpack_delegate.get());
}
TEST(Sqrt, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
} |
#include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
bool IsConvEquivalentToFullyConnected(const Convolution2DAttributes& attr) {
return attr.weights.shape.w == 1 &&
attr.weights.shape.h == 1 &&
attr.strides == HW(1, 1) &&
attr.dilations == HW(1, 1) &&
attr.padding.prepended == HW(0, 0) &&
attr.padding.appended == HW(0, 0);
}
class MakeFullyConnectedFromConvolution : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::CONVOLUTION_2D)) {
return {TransformStatus::SKIPPED, ""};
}
auto inputs = graph->FindInputs(node->id);
if (inputs.size() != 1) {
return {TransformStatus::SKIPPED, ""};
}
const auto& input_shape = inputs[0]->tensor.shape;
if (input_shape.w != 1 || input_shape.h != 1) {
return {TransformStatus::SKIPPED, ""};
}
const auto& conv_attr = absl::any_cast<const Convolution2DAttributes&>(
node->operation.attributes);
if (!IsConvEquivalentToFullyConnected(conv_attr)) {
return {TransformStatus::SKIPPED, ""};
}
FullyConnectedAttributes fc_attr;
fc_attr.weights = conv_attr.weights;
fc_attr.bias = conv_attr.bias;
node->operation.attributes = fc_attr;
node->operation.type = ToString(OperationType::FULLY_CONNECTED);
return {TransformStatus::APPLIED,
"Replaced convolution with fully connected."};
}
};
}
std::unique_ptr<NodeTransformation> NewMakeFullyConnectedFromConvolution() {
return absl::make_unique<MakeFullyConnectedFromConvolution>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MakeFullyConnected, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Convolution2DAttributes attr0;
attr0.padding.prepended = HW(0, 0);
attr0.padding.appended = HW(0, 0);
attr0.strides = HW(1, 1);
attr0.dilations = HW(1, 1);
attr0.weights.shape = OHWI(16, 1, 1, 8);
attr0.bias.shape = Linear(16);
Convolution2DAttributes attr1;
attr1.padding.prepended = HW(0, 0);
attr1.padding.appended = HW(0, 0);
attr1.strides = HW(4, 4);
attr1.dilations = HW(1, 1);
attr1.weights.shape = OHWI(16, 4, 4, 16);
attr1.bias.shape = Linear(16);
Convolution2DAttributes attr2;
attr2.padding.prepended = HW(0, 0);
attr2.padding.appended = HW(0, 0);
attr2.strides = HW(1, 1);
attr2.dilations = HW(1, 1);
attr2.weights.shape = OHWI(32, 1, 1, 16);
attr2.bias.shape = Linear(32);
auto conv1x1_node0 = graph.NewNode();
conv1x1_node0->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv1x1_node0->operation.attributes = attr0;
auto conv4x4_node1 = graph.NewNode();
conv4x4_node1->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv4x4_node1->operation.attributes = attr1;
auto conv1x1_node2 = graph.NewNode();
conv1x1_node2->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv1x1_node2->operation.attributes = attr2;
ASSERT_TRUE(graph.AddConsumer(conv1x1_node0->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, conv1x1_node2, &output).ok());
output->tensor.shape = BHWC(1, 1, 1, 32);
Value* link1 = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, conv1x1_node0, conv4x4_node1, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 16);
Value* link2 = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, conv4x4_node1, conv1x1_node2, &link2).ok());
link2->tensor.shape = BHWC(1, 1, 1, 16);
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewMakeFullyConnectedFromConvolution();
ModelTransformer transformer(&graph);
transformer.Apply("make_fully_connected", transformation.get());
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[1]->operation.type);
ASSERT_EQ(ToString(OperationType::FULLY_CONNECTED),
graph.nodes()[2]->operation.type);
auto fc_attr = absl::any_cast<FullyConnectedAttributes>(
graph.nodes()[2]->operation.attributes);
EXPECT_EQ(OHWI(32, 1, 1, 16), fc_attr.weights.shape);
EXPECT_EQ(Linear(32), fc_attr.bias.shape);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
bool IsConvEquivalentToFullyConnected(const Convolution2DAttributes& attr) {
return attr.weights.shape.w == 1 &&
attr.weights.shape.h == 1 &&
attr.strides == HW(1, 1) &&
attr.dilations == HW(1, 1) &&
attr.padding.prepended == HW(0, 0) &&
attr.padding.appended == HW(0, 0);
} | #include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MakeFullyConnected, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Convolution2DAttributes attr0;
attr0.padding.prepended = HW(0, 0);
attr0.padding.appended = HW(0, 0);
attr0.strides = HW(1, 1);
attr0.dilations = HW(1, 1);
attr0.weights.shape = OHWI(16, 1, 1, 8);
attr0.bias.shape = Linear(16);
Convolution2DAttributes attr1;
attr1.padding.prepended = HW(0, 0);
attr1.padding.appended = HW(0, 0);
attr1.strides = HW(4, 4);
attr1.dilations = HW(1, 1);
attr1.weights.shape = OHWI(16, 4, 4, 16);
attr1.bias.shape = Linear(16);
Convolution2DAttributes attr2;
attr2.padding.prepended = HW(0, 0);
attr2.padding.appended = HW(0, 0);
attr2.strides = HW(1, 1);
attr2.dilations = HW(1, 1);
attr2.weights.shape = OHWI(32, 1, 1, 16);
attr2.bias.shape = Linear(32);
auto conv1x1_node0 = graph.NewNode();
conv1x1_node0->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv1x1_node0->operation.attributes = attr0;
auto conv4x4_node1 = graph.NewNode();
conv4x4_node1->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv4x4_node1->operation.attributes = attr1;
auto conv1x1_node2 = graph.NewNode();
conv1x1_node2->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv1x1_node2->operation.attributes = attr2;
ASSERT_TRUE(graph.AddConsumer(conv1x1_node0->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, conv1x1_node2, &output).ok());
output->tensor.shape = BHWC(1, 1, 1, 32);
Value* link1 = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, conv1x1_node0, conv4x4_node1, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 16);
Value* link2 = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, conv4x4_node1, conv1x1_node2, &link2).ok());
link2->tensor.shape = BHWC(1, 1, 1, 16);
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewMakeFullyConnectedFromConvolution();
ModelTransformer transformer(&graph);
transformer.Apply("make_fully_connected", transformation.get());
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[1]->operation.type);
ASSERT_EQ(ToString(OperationType::FULLY_CONNECTED),
graph.nodes()[2]->operation.type);
auto fc_attr = absl::any_cast<FullyConnectedAttributes>(
graph.nodes()[2]->operation.attributes);
EXPECT_EQ(OHWI(32, 1, 1, 16), fc_attr.weights.shape);
EXPECT_EQ(Linear(32), fc_attr.bias.shape);
} |
#ifndef AROLLA_QEXPR_OPERATORS_BOOL_LOGIC_H_
#define AROLLA_QEXPR_OPERATORS_BOOL_LOGIC_H_
#include <type_traits>
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/ops/dense_ops.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
namespace arolla {
struct LogicalAndOp {
using run_on_missing = std::true_type;
bool operator()(bool lhs, bool rhs) const { return lhs && rhs; }
OptionalValue<bool> operator()(const OptionalValue<bool>& lhs,
const OptionalValue<bool>& rhs) const {
if (lhs.present) {
return !lhs.value ? false : rhs;
} else if (rhs.present) {
return !rhs.value ? false : lhs;
} else {
return OptionalValue<bool>{};
}
}
};
struct LogicalOrOp {
using run_on_missing = std::true_type;
bool operator()(bool lhs, bool rhs) const { return lhs || rhs; }
OptionalValue<bool> operator()(const OptionalValue<bool>& lhs,
const OptionalValue<bool>& rhs) const {
if (lhs.present) {
return lhs.value ? true : rhs;
} else if (rhs.present) {
return rhs.value ? true : lhs;
} else {
return OptionalValue<bool>{};
}
}
};
struct LogicalNotOp {
using run_on_missing = std::true_type;
bool operator()(bool arg) const { return !arg; }
};
struct LogicalIfOp {
using run_on_missing = std::true_type;
template <typename T, typename = std::enable_if_t<is_scalar_type_v<T>>>
const T& operator()(const OptionalValue<bool>& condition, const T& true_value,
const T& false_value, const T& missing_value) const {
if (condition.present) {
return condition.value ? true_value : false_value;
} else {
return missing_value;
}
}
template <typename T>
OptionalValue<T> operator()(const OptionalValue<bool>& condition,
const OptionalValue<T>& true_value,
const OptionalValue<T>& false_value,
const OptionalValue<T>& missing_value) const {
if (condition.present) {
return condition.value ? true_value : false_value;
} else {
return missing_value;
}
}
template <typename T>
DenseArray<T> operator()(EvaluationContext* ctx,
const DenseArray<bool>& condition,
const OptionalValue<T>& true_value,
const OptionalValue<T>& false_value,
const OptionalValue<T>& missing_value) const {
auto fn = [&true_value, &false_value, &missing_value](
OptionalValue<bool> condition) -> OptionalValue<T> {
return LogicalIfOp()(condition, true_value, false_value, missing_value);
};
auto op = CreateDenseOp<DenseOpFlags::kRunOnMissing, decltype(fn), T>(
fn, &ctx->buffer_factory());
return op(condition);
}
template <typename TrueFn, typename FalseFn, typename MissingFn,
std::enable_if_t<std::is_invocable_v<TrueFn> ||
std::is_invocable_v<FalseFn> ||
std::is_invocable_v<MissingFn>,
bool> = true>
auto operator()(const OptionalValue<bool>& condition,
const TrueFn& true_value, const FalseFn& false_value,
const MissingFn& missing_value) const {
auto unwrap = [](const auto& fn) {
if constexpr (std::is_invocable_v<decltype(fn)>) {
return fn();
} else {
return fn;
}
};
return condition.present
? (condition.value ? unwrap(true_value) : unwrap(false_value))
: unwrap(missing_value);
}
};
}
#endif | #include "arolla/qexpr/operators/bool/logic.h"
#include <cstdint>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using OB = OptionalValue<bool>;
using OI = OptionalValue<int64_t>;
constexpr auto NA = std::nullopt;
#define EXPECT_OPERATOR_RESULT_IS(op_name, lhs, rhs, result) \
do { \
EXPECT_THAT(InvokeOperator<OB>(op_name, lhs, rhs), IsOkAndHolds(result)); \
if (lhs.present && rhs.present) { \
EXPECT_THAT(InvokeOperator<bool>(op_name, lhs.value, rhs.value), \
IsOkAndHolds(result.value)); \
} \
} while (false)
class LogicOperatorsTest : public ::testing::Test {
void SetUp() final { ASSERT_OK(InitArolla()); }
};
TEST_F(LogicOperatorsTest, LogicalAnd) {
EXPECT_OPERATOR_RESULT_IS("bool.logical_and", OB{true}, OB{true}, OB{true});
EXPECT_OPERATOR_RESULT_IS("bool.logical_and", OB{true}, OB{false}, OB{false});
EXPECT_OPERATOR_RESULT_IS("bool.logical_and", OB{true}, OB{}, OB{});
EXPECT_OPERATOR_RESULT_IS("bool.logical_and", OB{false}, OB{true}, OB{false});
EXPECT_OPERATOR_RESULT_IS("bool.logical_and", OB{false}, OB{false},
OB{false});
EXPECT_OPERATOR_RESULT_IS("bool.logical_and", OB{false}, OB{}, OB{false});
EXPECT_OPERATOR_RESULT_IS("bool.logical_and", OB{}, OB{true}, OB{});
EXPECT_OPERATOR_RESULT_IS("bool.logical_and", OB{}, OB{false}, OB{false});
EXPECT_OPERATOR_RESULT_IS("bool.logical_and", OB{}, OB{}, OB{});
EXPECT_THAT(InvokeOperator<DenseArray<bool>>(
"bool.logical_and", CreateDenseArray<bool>({true, false}),
CreateDenseArray<bool>({true, NA})),
IsOkAndHolds(ElementsAre(true, false)));
}
TEST_F(LogicOperatorsTest, LogicalOr) {
EXPECT_OPERATOR_RESULT_IS("bool.logical_or", OB{true}, OB{true}, OB{true});
EXPECT_OPERATOR_RESULT_IS("bool.logical_or", OB{true}, OB{false}, OB{true});
EXPECT_OPERATOR_RESULT_IS("bool.logical_or", OB{true}, OB{}, OB{true});
EXPECT_OPERATOR_RESULT_IS("bool.logical_or", OB{false}, OB{true}, OB{true});
EXPECT_OPERATOR_RESULT_IS("bool.logical_or", OB{false}, OB{false}, OB{false});
EXPECT_OPERATOR_RESULT_IS("bool.logical_or", OB{false}, OB{}, OB{});
EXPECT_OPERATOR_RESULT_IS("bool.logical_or", OB{}, OB{true}, OB{true});
EXPECT_OPERATOR_RESULT_IS("bool.logical_or", OB{}, OB{false}, OB{});
EXPECT_OPERATOR_RESULT_IS("bool.logical_or", OB{}, OB{}, OB{});
EXPECT_THAT(InvokeOperator<DenseArray<bool>>(
"bool.logical_or",
CreateDenseArray<bool>(
{true, true, true, false, false, false, NA, NA, NA}),
CreateDenseArray<bool>(
{true, false, NA, true, false, NA, true, false, NA})),
IsOkAndHolds(ElementsAre(true, true, true, true, false, NA, true,
NA, NA)));
}
TEST_F(LogicOperatorsTest, LogicalNot) {
EXPECT_THAT(InvokeOperator<bool>("bool.logical_not", true),
IsOkAndHolds(false));
EXPECT_THAT(InvokeOperator<bool>("bool.logical_not", false),
IsOkAndHolds(true));
EXPECT_THAT(InvokeOperator<OB>("bool.logical_not", OB{}), IsOkAndHolds(OB{}));
}
TEST_F(LogicOperatorsTest, LogicalIf) {
EXPECT_THAT(
InvokeOperator<OI>("bool.logical_if", OB{true}, OI{1}, OI{2}, OI{3}),
IsOkAndHolds(1));
EXPECT_THAT(
InvokeOperator<OI>("bool.logical_if", OB{true}, OI{}, OI{2}, OI{3}),
IsOkAndHolds(std::nullopt));
EXPECT_THAT(
InvokeOperator<OI>("bool.logical_if", OB{false}, OI{1}, OI{2}, OI{3}),
IsOkAndHolds(2));
EXPECT_THAT(
InvokeOperator<OI>("bool.logical_if", OB{false}, OI{1}, OI{}, OI{3}),
IsOkAndHolds(std::nullopt));
EXPECT_THAT(InvokeOperator<OI>("bool.logical_if", OB{}, OI{1}, OI{2}, OI{3}),
IsOkAndHolds(3));
EXPECT_THAT(InvokeOperator<OI>("bool.logical_if", OB{}, OI{1}, OI{2}, OI{}),
IsOkAndHolds(std::nullopt));
}
TEST_F(LogicOperatorsTest, LogicalIfOnLambdas) {
auto lambda = [](auto x) { return [x]() { return x; }; };
auto no_call_lambda = [](auto x) {
return [x]() {
LOG(FATAL) << "Lambda shouldn't be called. " << x;
return x;
};
};
EXPECT_THAT(LogicalIfOp()(OB{true}, OI{1}, OI{2}, OI{3}), Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{true}, lambda(OI{1}), OI{2}, OI{3}), Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, no_call_lambda(OI{1}), OI{2}, OI{3}),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, no_call_lambda(OI{1}), OI{2}, OI{3}),
Eq(OI{3}));
EXPECT_THAT(LogicalIfOp()(OB{true}, OI{1}, no_call_lambda(OI{2}), OI{3}),
Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, OI{1}, lambda(OI{2}), OI{3}), Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, OI{1}, no_call_lambda(OI{2}), OI{3}),
Eq(OI{3}));
EXPECT_THAT(LogicalIfOp()(OB{true}, OI{1}, OI{2}, no_call_lambda(OI{3})),
Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, OI{1}, OI{2}, no_call_lambda(OI{3})),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, OI{1}, OI{2}, lambda(OI{3})), Eq(OI{3}));
EXPECT_THAT(
LogicalIfOp()(OB{true}, lambda(OI{1}), no_call_lambda(OI{2}), OI{3}),
Eq(OI{1}));
EXPECT_THAT(
LogicalIfOp()(OB{false}, no_call_lambda(OI{1}), lambda(OI{2}), OI{3}),
Eq(OI{2}));
EXPECT_THAT(
LogicalIfOp()(OB{}, no_call_lambda(OI{1}), no_call_lambda(OI{2}), OI{3}),
Eq(OI{3}));
EXPECT_THAT(
LogicalIfOp()(OB{true}, lambda(OI{1}), OI{2}, no_call_lambda(OI{3})),
Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, no_call_lambda(OI{1}), OI{2},
no_call_lambda(OI{3})),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, no_call_lambda(OI{1}), OI{2}, lambda(OI{3})),
Eq(OI{3}));
EXPECT_THAT(LogicalIfOp()(OB{true}, OI{1}, no_call_lambda(OI{2}),
no_call_lambda(OI{3})),
Eq(OI{1}));
EXPECT_THAT(
LogicalIfOp()(OB{false}, OI{1}, lambda(OI{2}), no_call_lambda(OI{3})),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, OI{1}, no_call_lambda(OI{2}), lambda(OI{3})),
Eq(OI{3}));
EXPECT_THAT(LogicalIfOp()(OB{true}, lambda(OI{1}), no_call_lambda(OI{2}),
no_call_lambda(OI{3})),
Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, no_call_lambda(OI{1}), lambda(OI{2}),
no_call_lambda(OI{3})),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, no_call_lambda(OI{1}), no_call_lambda(OI{2}),
lambda(OI{3})),
Eq(OI{3}));
}
TEST_F(LogicOperatorsTest, LogicalIfOnLambdasWithError) {
auto lambda = [](auto x) { return [x]() { return x; }; };
auto lambda_ok = [](auto x) {
return [x]() { return absl::StatusOr<decltype(x)>(x); };
};
auto lambda_fail = [](auto x) {
return [x]() {
return absl::StatusOr<decltype(x)>(absl::UnimplementedError("fake"));
};
};
auto no_call_lambda = [](auto x) {
return [x]() {
LOG(FATAL) << "Lambda shouldn't be called. " << x;
return x;
};
};
auto no_call_lambda_ok = [](auto x) {
return [x]() {
LOG(FATAL) << "Lambda shouldn't be called. " << x;
return absl::StatusOr<decltype(x)>(x);
};
};
auto op = LogicalIfOp();
EXPECT_THAT(op(OB{true}, OI{1}, OI{2}, OI{3}), Eq(OI{1}));
EXPECT_THAT(op(OB{true}, lambda_ok(OI{1}), OI{2}, OI{3}),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, no_call_lambda_ok(OI{1}), OI{2}, OI{3}),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, no_call_lambda_ok(OI{1}), OI{2}, OI{3}),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, OI{1}, no_call_lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, OI{1}, lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, OI{1}, no_call_lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, OI{1}, OI{2}, no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, OI{1}, OI{2}, no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, OI{1}, OI{2}, lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, lambda_ok(OI{1}), no_call_lambda(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, no_call_lambda(OI{1}), lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, no_call_lambda(OI{1}), no_call_lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, lambda_ok(OI{1}), OI{2}, no_call_lambda(OI{3})),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(
op(OB{false}, no_call_lambda(OI{1}), OI{2}, no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, no_call_lambda(OI{1}), OI{2}, lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(
op(OB{true}, OI{1}, no_call_lambda_ok(OI{2}), no_call_lambda(OI{3})),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, OI{1}, lambda(OI{2}), no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, OI{1}, no_call_lambda_ok(OI{2}), lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, lambda_ok(OI{1}), no_call_lambda(OI{2}),
no_call_lambda(OI{3})),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, no_call_lambda_ok(OI{1}), lambda(OI{2}),
no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, no_call_lambda_ok(OI{1}), no_call_lambda_ok(OI{2}),
lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, lambda_fail(OI{1}), no_call_lambda(OI{2}),
no_call_lambda(OI{3})),
StatusIs(absl::StatusCode::kUnimplemented, "fake"));
EXPECT_THAT(op(OB{false}, no_call_lambda(OI{1}), lambda_fail(OI{2}),
no_call_lambda(OI{3})),
StatusIs(absl::StatusCode::kUnimplemented, "fake"));
EXPECT_THAT(op(OB{}, no_call_lambda_ok(OI{1}), no_call_lambda(OI{2}),
lambda_fail(OI{3})),
StatusIs(absl::StatusCode::kUnimplemented, "fake"));
}
TEST_F(LogicOperatorsTest, LogicalIfDenseArray) {
EXPECT_THAT(InvokeOperator<DenseArray<int64_t>>(
"bool.logical_if",
CreateDenseArray<bool>(
{true, true, false, false, std::nullopt, std::nullopt}),
CreateDenseArray<int64_t>(
{1, std::nullopt, 1, std::nullopt, 1, std::nullopt}),
CreateDenseArray<int64_t>(
{2, std::nullopt, 2, std::nullopt, 2, std::nullopt}),
CreateDenseArray<int64_t>(
{3, std::nullopt, 3, std::nullopt, 3, std::nullopt})),
IsOkAndHolds(ElementsAre(1, std::nullopt, 2, std::nullopt, 3,
std::nullopt)));
}
TEST_F(LogicOperatorsTest, LogicalIfDenseArrayWithScalars) {
EXPECT_THAT(InvokeOperator<DenseArray<int64_t>>(
"bool.logical_if",
CreateDenseArray<bool>({true, false, std::nullopt}), OI{1},
OI{2}, OI{3}),
IsOkAndHolds(ElementsAre(1, 2, 3)));
}
}
} | template <typename T, typename = std::enable_if_t<is_scalar_type_v<T>>>
const T& operator()(const OptionalValue<bool>& condition, const T& true_value,
const T& false_value, const T& missing_value) const {
if (condition.present) {
return condition.value ? true_value : false_value;
} else {
return missing_value;
}
} | TEST_F(LogicOperatorsTest, LogicalIf) {
EXPECT_THAT(
InvokeOperator<OI>("bool.logical_if", OB{true}, OI{1}, OI{2}, OI{3}),
IsOkAndHolds(1));
EXPECT_THAT(
InvokeOperator<OI>("bool.logical_if", OB{true}, OI{}, OI{2}, OI{3}),
IsOkAndHolds(std::nullopt));
EXPECT_THAT(
InvokeOperator<OI>("bool.logical_if", OB{false}, OI{1}, OI{2}, OI{3}),
IsOkAndHolds(2));
EXPECT_THAT(
InvokeOperator<OI>("bool.logical_if", OB{false}, OI{1}, OI{}, OI{3}),
IsOkAndHolds(std::nullopt));
EXPECT_THAT(InvokeOperator<OI>("bool.logical_if", OB{}, OI{1}, OI{2}, OI{3}),
IsOkAndHolds(3));
EXPECT_THAT(InvokeOperator<OI>("bool.logical_if", OB{}, OI{1}, OI{2}, OI{}),
IsOkAndHolds(std::nullopt));
}
TEST_F(LogicOperatorsTest, LogicalIfOnLambdas) {
auto lambda = [](auto x) { return [x]() { return x; }; };
auto no_call_lambda = [](auto x) {
return [x]() {
LOG(FATAL) << "Lambda shouldn't be called. " << x;
return x;
};
};
EXPECT_THAT(LogicalIfOp()(OB{true}, OI{1}, OI{2}, OI{3}), Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{true}, lambda(OI{1}), OI{2}, OI{3}), Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, no_call_lambda(OI{1}), OI{2}, OI{3}),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, no_call_lambda(OI{1}), OI{2}, OI{3}),
Eq(OI{3}));
EXPECT_THAT(LogicalIfOp()(OB{true}, OI{1}, no_call_lambda(OI{2}), OI{3}),
Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, OI{1}, lambda(OI{2}), OI{3}), Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, OI{1}, no_call_lambda(OI{2}), OI{3}),
Eq(OI{3}));
EXPECT_THAT(LogicalIfOp()(OB{true}, OI{1}, OI{2}, no_call_lambda(OI{3})),
Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, OI{1}, OI{2}, no_call_lambda(OI{3})),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, OI{1}, OI{2}, lambda(OI{3})), Eq(OI{3}));
EXPECT_THAT(
LogicalIfOp()(OB{true}, lambda(OI{1}), no_call_lambda(OI{2}), OI{3}),
Eq(OI{1}));
EXPECT_THAT(
LogicalIfOp()(OB{false}, no_call_lambda(OI{1}), lambda(OI{2}), OI{3}),
Eq(OI{2}));
EXPECT_THAT(
LogicalIfOp()(OB{}, no_call_lambda(OI{1}), no_call_lambda(OI{2}), OI{3}),
Eq(OI{3}));
EXPECT_THAT(
LogicalIfOp()(OB{true}, lambda(OI{1}), OI{2}, no_call_lambda(OI{3})),
Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, no_call_lambda(OI{1}), OI{2},
no_call_lambda(OI{3})),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, no_call_lambda(OI{1}), OI{2}, lambda(OI{3})),
Eq(OI{3}));
EXPECT_THAT(LogicalIfOp()(OB{true}, OI{1}, no_call_lambda(OI{2}),
no_call_lambda(OI{3})),
Eq(OI{1}));
EXPECT_THAT(
LogicalIfOp()(OB{false}, OI{1}, lambda(OI{2}), no_call_lambda(OI{3})),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, OI{1}, no_call_lambda(OI{2}), lambda(OI{3})),
Eq(OI{3}));
EXPECT_THAT(LogicalIfOp()(OB{true}, lambda(OI{1}), no_call_lambda(OI{2}),
no_call_lambda(OI{3})),
Eq(OI{1}));
EXPECT_THAT(LogicalIfOp()(OB{false}, no_call_lambda(OI{1}), lambda(OI{2}),
no_call_lambda(OI{3})),
Eq(OI{2}));
EXPECT_THAT(LogicalIfOp()(OB{}, no_call_lambda(OI{1}), no_call_lambda(OI{2}),
lambda(OI{3})),
Eq(OI{3}));
}
TEST_F(LogicOperatorsTest, LogicalIfOnLambdasWithError) {
auto lambda = [](auto x) { return [x]() { return x; }; };
auto lambda_ok = [](auto x) {
return [x]() { return absl::StatusOr<decltype(x)>(x); };
};
auto lambda_fail = [](auto x) {
return [x]() {
return absl::StatusOr<decltype(x)>(absl::UnimplementedError("fake"));
};
};
auto no_call_lambda = [](auto x) {
return [x]() {
LOG(FATAL) << "Lambda shouldn't be called. " << x;
return x;
};
};
auto no_call_lambda_ok = [](auto x) {
return [x]() {
LOG(FATAL) << "Lambda shouldn't be called. " << x;
return absl::StatusOr<decltype(x)>(x);
};
};
auto op = LogicalIfOp();
EXPECT_THAT(op(OB{true}, OI{1}, OI{2}, OI{3}), Eq(OI{1}));
EXPECT_THAT(op(OB{true}, lambda_ok(OI{1}), OI{2}, OI{3}),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, no_call_lambda_ok(OI{1}), OI{2}, OI{3}),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, no_call_lambda_ok(OI{1}), OI{2}, OI{3}),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, OI{1}, no_call_lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, OI{1}, lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, OI{1}, no_call_lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, OI{1}, OI{2}, no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, OI{1}, OI{2}, no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, OI{1}, OI{2}, lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, lambda_ok(OI{1}), no_call_lambda(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, no_call_lambda(OI{1}), lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, no_call_lambda(OI{1}), no_call_lambda_ok(OI{2}), OI{3}),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, lambda_ok(OI{1}), OI{2}, no_call_lambda(OI{3})),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(
op(OB{false}, no_call_lambda(OI{1}), OI{2}, no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, no_call_lambda(OI{1}), OI{2}, lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(
op(OB{true}, OI{1}, no_call_lambda_ok(OI{2}), no_call_lambda(OI{3})),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, OI{1}, lambda(OI{2}), no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, OI{1}, no_call_lambda_ok(OI{2}), lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, lambda_ok(OI{1}), no_call_lambda(OI{2}),
no_call_lambda(OI{3})),
IsOkAndHolds(Eq(OI{1})));
EXPECT_THAT(op(OB{false}, no_call_lambda_ok(OI{1}), lambda(OI{2}),
no_call_lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{2})));
EXPECT_THAT(op(OB{}, no_call_lambda_ok(OI{1}), no_call_lambda_ok(OI{2}),
lambda_ok(OI{3})),
IsOkAndHolds(Eq(OI{3})));
EXPECT_THAT(op(OB{true}, lambda_fail(OI{1}), no_call_lambda(OI{2}),
no_call_lambda(OI{3})),
StatusIs(absl::StatusCode::kUnimplemented, "fake"));
EXPECT_THAT(op(OB{false}, no_call_lambda(OI{1}), lambda_fail(OI{2}),
no_call_lambda(OI{3})),
StatusIs(absl::StatusCode::kUnimplemented, "fake"));
EXPECT_THAT(op(OB{}, no_call_lambda_ok(OI{1}), no_call_lambda(OI{2}),
lambda_fail(OI{3})),
StatusIs(absl::StatusCode::kUnimplemented, "fake"));
} |
#include "eval/compiler/cel_expression_builder_flat_impl.h"
#include <memory>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/base/macros.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "base/ast.h"
#include "common/native_type.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/public/cel_expression.h"
#include "extensions/protobuf/ast_converters.h"
#include "internal/status_macros.h"
#include "runtime/runtime_issue.h"
namespace google::api::expr::runtime {
using ::cel::Ast;
using ::cel::RuntimeIssue;
using ::google::api::expr::v1alpha1::CheckedExpr;
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::SourceInfo;
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpression(
const Expr* expr, const SourceInfo* source_info,
std::vector<absl::Status>* warnings) const {
ABSL_ASSERT(expr != nullptr);
CEL_ASSIGN_OR_RETURN(
std::unique_ptr<Ast> converted_ast,
cel::extensions::CreateAstFromParsedExpr(*expr, source_info));
return CreateExpressionImpl(std::move(converted_ast), warnings);
}
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpression(
const Expr* expr, const SourceInfo* source_info) const {
return CreateExpression(expr, source_info,
nullptr);
}
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpression(
const CheckedExpr* checked_expr,
std::vector<absl::Status>* warnings) const {
ABSL_ASSERT(checked_expr != nullptr);
CEL_ASSIGN_OR_RETURN(
std::unique_ptr<Ast> converted_ast,
cel::extensions::CreateAstFromCheckedExpr(*checked_expr));
return CreateExpressionImpl(std::move(converted_ast), warnings);
}
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpression(
const CheckedExpr* checked_expr) const {
return CreateExpression(checked_expr, nullptr);
}
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpressionImpl(
std::unique_ptr<Ast> converted_ast,
std::vector<absl::Status>* warnings) const {
std::vector<RuntimeIssue> issues;
auto* issues_ptr = (warnings != nullptr) ? &issues : nullptr;
CEL_ASSIGN_OR_RETURN(FlatExpression impl,
flat_expr_builder_.CreateExpressionImpl(
std::move(converted_ast), issues_ptr));
if (issues_ptr != nullptr) {
for (const auto& issue : issues) {
warnings->push_back(issue.ToStatus());
}
}
if (flat_expr_builder_.options().max_recursion_depth != 0 &&
!impl.subexpressions().empty() &&
impl.subexpressions().front().size() == 1 &&
impl.subexpressions().front().front()->GetNativeTypeId() ==
cel::NativeTypeId::For<WrappedDirectStep>()) {
return CelExpressionRecursiveImpl::Create(std::move(impl));
}
return std::make_unique<CelExpressionFlatImpl>(std::move(impl));
}
} | #include "eval/compiler/cel_expression_builder_flat_impl.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "eval/compiler/constant_folding.h"
#include "eval/compiler/regex_precompilation_optimization.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/portable_cel_function_adapter.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/public/structs/protobuf_descriptor_type_provider.h"
#include "eval/public/testing/matchers.h"
#include "extensions/bindings_ext.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "parser/macro.h"
#include "parser/parser.h"
#include "runtime/runtime_options.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace google::api::expr::runtime {
namespace {
using ::google::api::expr::v1alpha1::CheckedExpr;
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::v1alpha1::SourceInfo;
using ::google::api::expr::parser::Macro;
using ::google::api::expr::parser::Parse;
using ::google::api::expr::parser::ParseWithMacros;
using ::google::api::expr::test::v1::proto3::NestedTestAllTypes;
using ::google::api::expr::test::v1::proto3::TestAllTypes;
using testing::_;
using testing::Contains;
using testing::HasSubstr;
using testing::IsNull;
using testing::NotNull;
using cel::internal::StatusIs;
TEST(CelExpressionBuilderFlatImplTest, Error) {
Expr expr;
SourceInfo source_info;
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid empty expression")));
}
TEST(CelExpressionBuilderFlatImplTest, ParsedExpr) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse("1 + 2"));
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelInt64(3));
}
struct RecursiveTestCase {
std::string test_name;
std::string expr;
test::CelValueMatcher matcher;
};
class RecursivePlanTest : public ::testing::TestWithParam<RecursiveTestCase> {
protected:
absl::Status SetupBuilder(CelExpressionBuilderFlatImpl& builder) {
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
builder.GetTypeRegistry()->RegisterEnum("TestEnum",
{{"FOO", 1}, {"BAR", 2}});
CEL_RETURN_IF_ERROR(RegisterBuiltinFunctions(builder.GetRegistry()));
return builder.GetRegistry()->RegisterLazyFunction(CelFunctionDescriptor(
"LazilyBoundMult", false,
{CelValue::Type::kInt64, CelValue::Type::kInt64}));
}
absl::Status SetupActivation(Activation& activation, google::protobuf::Arena* arena) {
activation.InsertValue("int_1", CelValue::CreateInt64(1));
activation.InsertValue("string_abc", CelValue::CreateStringView("abc"));
activation.InsertValue("string_def", CelValue::CreateStringView("def"));
auto* map = google::protobuf::Arena::Create<CelMapBuilder>(arena);
CEL_RETURN_IF_ERROR(
map->Add(CelValue::CreateStringView("a"), CelValue::CreateInt64(1)));
CEL_RETURN_IF_ERROR(
map->Add(CelValue::CreateStringView("b"), CelValue::CreateInt64(2)));
activation.InsertValue("map_var", CelValue::CreateMap(map));
auto* msg = google::protobuf::Arena::Create<NestedTestAllTypes>(arena);
msg->mutable_child()->mutable_payload()->set_single_int64(42);
activation.InsertValue("struct_var",
CelProtoWrapper::CreateMessage(msg, arena));
activation.InsertValue("TestEnum.BAR", CelValue::CreateInt64(-1));
CEL_RETURN_IF_ERROR(activation.InsertFunction(
PortableBinaryFunctionAdapter<int64_t, int64_t, int64_t>::Create(
"LazilyBoundMult", false,
[](google::protobuf::Arena*, int64_t lhs, int64_t rhs) -> int64_t {
return lhs * rhs;
})));
return absl::OkStatus();
}
};
absl::StatusOr<ParsedExpr> ParseWithBind(absl::string_view cel) {
static const std::vector<Macro>* kMacros = []() {
auto* result = new std::vector<Macro>(Macro::AllMacros());
absl::c_copy(cel::extensions::bindings_macros(),
std::back_inserter(*result));
return result;
}();
return ParseWithMacros(cel, *kMacros, "<input>");
}
TEST_P(RecursivePlanTest, ParsedExprRecursiveImpl) {
const RecursiveTestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, ParseWithBind(test_case.expr));
cel::RuntimeOptions options;
options.container = "google.api.expr.test.v1.proto3";
google::protobuf::Arena arena;
options.max_recursion_depth = -1;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(SetupBuilder(builder));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
EXPECT_THAT(dynamic_cast<const CelExpressionRecursiveImpl*>(plan.get()),
NotNull());
Activation activation;
ASSERT_OK(SetupActivation(activation, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test_case.matcher);
}
TEST_P(RecursivePlanTest, ParsedExprRecursiveOptimizedImpl) {
const RecursiveTestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, ParseWithBind(test_case.expr));
cel::RuntimeOptions options;
options.container = "google.api.expr.test.v1.proto3";
google::protobuf::Arena arena;
options.max_recursion_depth = -1;
options.enable_comprehension_list_append = true;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(SetupBuilder(builder));
builder.flat_expr_builder().AddProgramOptimizer(
cel::runtime_internal::CreateConstantFoldingOptimizer(
cel::extensions::ProtoMemoryManagerRef(&arena)));
builder.flat_expr_builder().AddProgramOptimizer(
CreateRegexPrecompilationExtension(options.regex_max_program_size));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
EXPECT_THAT(dynamic_cast<const CelExpressionRecursiveImpl*>(plan.get()),
NotNull());
Activation activation;
ASSERT_OK(SetupActivation(activation, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test_case.matcher);
}
TEST_P(RecursivePlanTest, ParsedExprRecursiveTraceSupport) {
const RecursiveTestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, ParseWithBind(test_case.expr));
cel::RuntimeOptions options;
options.container = "google.api.expr.test.v1.proto3";
google::protobuf::Arena arena;
auto cb = [](int64_t id, const CelValue& value, google::protobuf::Arena* arena) {
return absl::OkStatus();
};
options.max_recursion_depth = -1;
options.enable_recursive_tracing = true;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(SetupBuilder(builder));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
EXPECT_THAT(dynamic_cast<const CelExpressionRecursiveImpl*>(plan.get()),
NotNull());
Activation activation;
ASSERT_OK(SetupActivation(activation, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Trace(activation, &arena, cb));
EXPECT_THAT(result, test_case.matcher);
}
TEST_P(RecursivePlanTest, Disabled) {
google::protobuf::LinkMessageReflection<TestAllTypes>();
const RecursiveTestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, ParseWithBind(test_case.expr));
cel::RuntimeOptions options;
options.container = "google.api.expr.test.v1.proto3";
google::protobuf::Arena arena;
options.max_recursion_depth = 0;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(SetupBuilder(builder));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
EXPECT_THAT(dynamic_cast<const CelExpressionRecursiveImpl*>(plan.get()),
IsNull());
Activation activation;
ASSERT_OK(SetupActivation(activation, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test_case.matcher);
}
INSTANTIATE_TEST_SUITE_P(
RecursivePlanTest, RecursivePlanTest,
testing::ValuesIn(std::vector<RecursiveTestCase>{
{"constant", "'abc'", test::IsCelString("abc")},
{"call", "1 + 2", test::IsCelInt64(3)},
{"nested_call", "1 + 1 + 1 + 1", test::IsCelInt64(4)},
{"and", "true && false", test::IsCelBool(false)},
{"or", "true || false", test::IsCelBool(true)},
{"ternary", "(true || false) ? 2 + 2 : 3 + 3", test::IsCelInt64(4)},
{"create_list", "3 in [1, 2, 3]", test::IsCelBool(true)},
{"create_list_complex", "3 in [2 / 2, 4 / 2, 6 / 2]",
test::IsCelBool(true)},
{"ident", "int_1 == 1", test::IsCelBool(true)},
{"ident_complex", "int_1 + 2 > 4 ? string_abc : string_def",
test::IsCelString("def")},
{"select", "struct_var.child.payload.single_int64",
test::IsCelInt64(42)},
{"nested_select", "[map_var.a, map_var.b].size() == 2",
test::IsCelBool(true)},
{"map_index", "map_var['b']", test::IsCelInt64(2)},
{"list_index", "[1, 2, 3][1]", test::IsCelInt64(2)},
{"compre_exists", "[1, 2, 3, 4].exists(x, x == 3)",
test::IsCelBool(true)},
{"compre_map", "8 in [1, 2, 3, 4].map(x, x * 2)",
test::IsCelBool(true)},
{"map_var_compre_exists", "map_var.exists(key, key == 'b')",
test::IsCelBool(true)},
{"map_compre_exists", "{'a': 1, 'b': 2}.exists(k, k == 'b')",
test::IsCelBool(true)},
{"create_map", "{'a': 42, 'b': 0, 'c': 0}.size()", test::IsCelInt64(3)},
{"create_struct",
"NestedTestAllTypes{payload: TestAllTypes{single_int64: "
"-42}}.payload.single_int64",
test::IsCelInt64(-42)},
{"bind", R"(cel.bind(x, "1", x + x + x + x))",
test::IsCelString("1111")},
{"nested_bind", R"(cel.bind(x, 20, cel.bind(y, 30, x + y)))",
test::IsCelInt64(50)},
{"bind_with_comprehensions",
R"(cel.bind(x, [1, 2], cel.bind(y, x.map(z, z * 2), y.exists(z, z == 4))))",
test::IsCelBool(true)},
{"shadowable_value_default", R"(TestEnum.FOO == 1)",
test::IsCelBool(true)},
{"shadowable_value_shadowed", R"(TestEnum.BAR == -1)",
test::IsCelBool(true)},
{"lazily_resolved_function", "LazilyBoundMult(123, 2) == 246",
test::IsCelBool(true)},
{"re_matches", "matches(string_abc, '[ad][be][cf]')",
test::IsCelBool(true)},
{"re_matches_receiver",
"(string_abc + string_def).matches(r'(123)?' + r'abc' + r'def')",
test::IsCelBool(true)},
}),
[](const testing::TestParamInfo<RecursiveTestCase>& info) -> std::string {
return info.param.test_name;
});
TEST(CelExpressionBuilderFlatImplTest, ParsedExprWithWarnings) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse("1 + 2"));
cel::RuntimeOptions options;
options.fail_on_warnings = false;
CelExpressionBuilderFlatImpl builder(options);
std::vector<absl::Status> warnings;
ASSERT_OK_AND_ASSIGN(
std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info(),
&warnings));
EXPECT_THAT(warnings, Contains(StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("No overloads"))));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelError(
StatusIs(_, HasSubstr("No matching overloads"))));
}
TEST(CelExpressionBuilderFlatImplTest, CheckedExpr) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse("1 + 2"));
CheckedExpr checked_expr;
checked_expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
checked_expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&checked_expr));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelInt64(3));
}
TEST(CelExpressionBuilderFlatImplTest, CheckedExprWithWarnings) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse("1 + 2"));
CheckedExpr checked_expr;
checked_expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
checked_expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
cel::RuntimeOptions options;
options.fail_on_warnings = false;
CelExpressionBuilderFlatImpl builder(options);
std::vector<absl::Status> warnings;
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&checked_expr, &warnings));
EXPECT_THAT(warnings, Contains(StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("No overloads"))));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelError(
StatusIs(_, HasSubstr("No matching overloads"))));
}
}
} | absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpression(
const CheckedExpr* checked_expr,
std::vector<absl::Status>* warnings) const {
ABSL_ASSERT(checked_expr != nullptr);
CEL_ASSIGN_OR_RETURN(
std::unique_ptr<Ast> converted_ast,
cel::extensions::CreateAstFromCheckedExpr(*checked_expr));
return CreateExpressionImpl(std::move(converted_ast), warnings);
} | TEST(CelExpressionBuilderFlatImplTest, CheckedExprWithWarnings) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse("1 + 2"));
CheckedExpr checked_expr;
checked_expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
checked_expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
cel::RuntimeOptions options;
options.fail_on_warnings = false;
CelExpressionBuilderFlatImpl builder(options);
std::vector<absl::Status> warnings;
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&checked_expr, &warnings));
EXPECT_THAT(warnings, Contains(StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("No overloads"))));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelError(
StatusIs(_, HasSubstr("No matching overloads"))));
} |
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/future.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/future.h"
#include "tensorflow/core/tfrt/mlrt/kernel/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel_runner_utils.h"
#include "tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/tstring.h"
#include "tfrt/host_context/concurrent_work_queue.h"
using tensorflow::ifrt_serving::IfrtModelContext;
namespace tensorflow {
namespace tf_mlrt {
namespace {
int64_t GetSizeFromVarHandle(const ResourceHandle& handle) {
int size = 0;
for (auto& dtype_and_shape : handle.dtypes_and_shapes()) {
size += DataTypeSize(dtype_and_shape.dtype) *
dtype_and_shape.shape.num_elements();
}
return size;
}
struct MlrtIfrtRestoreVariableKernel : mlrt::KernelFrame {
using KernelFrame::KernelFrame;
static constexpr char kName[] = "tf_mlrt.ifrt_restore_variable";
tensorflow::tfrt_stub::FallbackTensor prefix() const {
DCHECK_GT(arguments().size(), 3);
return arguments()[0].Get<tensorflow::tfrt_stub::FallbackTensor>();
}
tensorflow::tfrt_stub::FallbackTensor tensor_names() const {
DCHECK_GT(arguments().size(), 3);
return arguments()[1].Get<tensorflow::tfrt_stub::FallbackTensor>();
}
tensorflow::tfrt_stub::FallbackTensor shape_and_slices() const {
DCHECK_GT(arguments().size(), 3);
return arguments()[2].Get<tensorflow::tfrt_stub::FallbackTensor>();
}
mlrt::bc::Vector<tensorflow::DataType> restored_dtypes() const {
return attributes().GetAs<mlrt::bc::Vector<tensorflow::DataType>>(0);
}
mlrt::bc::Vector<bool> truncate_in_cast() const {
return attributes().GetAs<mlrt::bc::Vector<bool>>(1);
}
std::vector<tensorflow::tfrt_stub::FallbackTensor> var_handles() const {
DCHECK_GT(arguments().size(), 3);
std::vector<tensorflow::tfrt_stub::FallbackTensor> result;
result.reserve(arguments().size() - 3);
for (int i = 3; i < arguments().size(); ++i) {
result.push_back(
arguments()[i].Get<tensorflow::tfrt_stub::FallbackTensor>());
}
return result;
}
Context& context() { return execution_context().GetUserContext<Context>(); }
void Invoke();
private:
static constexpr int kNumRestoreClusters = 4;
struct RestoreVariableShard {
tensorflow::Tensor prefix;
tensorflow::Tensor tensor_names;
tensorflow::Tensor shape_and_slices;
std::vector<tensorflow::tfrt_stub::FallbackTensor> var_handles;
tensorflow::AttrValue dtypes_attr_value;
std::vector<tensorflow::DataType> restored_dtypes;
std::vector<bool> truncate_in_cast;
};
absl::Status InvokeHelper();
absl::Status RunShard(RestoreVariableShard shard);
absl::Status ValidateInput();
};
void MlrtIfrtRestoreVariableKernel::Invoke() {
absl::Status status = InvokeHelper();
if (!status.ok()) {
execution_context().Fail(std::move(status));
return;
}
}
absl::StatusOr<tensorflow::Tensor> Cast(
tensorflow::Tensor& in_tensor, tensorflow::DataType restored_dtype,
tensorflow::DataType cast_dtype, bool truncate_in_cast,
const tensorflow::DeviceMgr& device_manager,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
OpKernelContext::Params& params) {
auto runner =
tfrt_stub::OpKernelRunner::Create(
"Cast", "Cast", params.device->name(),
1,
[&](tensorflow::AttrValueMap* attr_value_map) {
tensorflow::AttrValue restored_dtype_attr_value;
restored_dtype_attr_value.set_type(restored_dtype);
attr_value_map->insert({"SrcT", restored_dtype_attr_value});
tensorflow::AttrValue cast_dtype_attr_value;
cast_dtype_attr_value.set_type(cast_dtype);
attr_value_map->insert({"DstT", cast_dtype_attr_value});
tensorflow::AttrValue truncate_attr_value;
truncate_attr_value.set_b(truncate_in_cast);
attr_value_map->insert({"Truncate", truncate_attr_value});
return absl::OkStatus();
},
device_manager, process_function_library_runtime)
.value();
std::vector<tensorflow::TensorValue> input_tf_tensor_values;
input_tf_tensor_values.push_back(tensorflow::TensorValue(&in_tensor));
SetUpParams(runner, input_tf_tensor_values, params);
OpKernelContext op_kernel_context(¶ms, 1);
runner.Run(&op_kernel_context);
if (!op_kernel_context.status().ok()) {
return op_kernel_context.status();
}
DCHECK_EQ(op_kernel_context.num_outputs(), 1);
return *(op_kernel_context.mutable_output(0));
}
absl::Status MlrtIfrtRestoreVariableKernel::RunShard(
RestoreVariableShard shard) {
std::optional<IfrtModelContext*> ifrt_model_context =
context().resource_context().GetResource<IfrtModelContext>(
"IfrtModelContext");
if (!ifrt_model_context.has_value()) {
return absl::FailedPreconditionError(
"RestoreVariableOp: failed to fetch IfrtModelContext");
}
const int num_outputs = shard.var_handles.size();
DCHECK_EQ(num_outputs, shard.tensor_names.NumElements());
auto& fallback_request_state = context().fallback_request_state();
auto runner =
tfrt_stub::OpKernelRunner::Create(
"RestoreV2", "RestoreV2",
context().params().device->name(),
3,
[&](tensorflow::AttrValueMap* attr_value_map) {
attr_value_map->insert({"dtypes", shard.dtypes_attr_value});
return absl::OkStatus();
},
fallback_request_state.device_manager(),
fallback_request_state.process_function_library_runtime())
.value();
std::vector<tensorflow::TensorValue> input_tf_tensor_values;
static constexpr int kNumInputArgs = 3;
input_tf_tensor_values.resize(kNumInputArgs);
input_tf_tensor_values[0].tensor = &shard.prefix;
input_tf_tensor_values[1].tensor = &shard.tensor_names;
input_tf_tensor_values[2].tensor = &shard.shape_and_slices;
auto& params = context().params();
SetUpParams(runner, input_tf_tensor_values, params);
params.device = context().fallback_request_state().device_manager().HostCPU();
struct AsyncState {
explicit AsyncState(
const std::vector<tensorflow::TensorValue>& input_tf_tensor_values,
const OpKernelContext::Params& params, int num_outputs,
const tensorflow::DeviceMgr& device_manager,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime)
: run_state(input_tf_tensor_values, params),
context(&run_state.params, num_outputs),
device_manager(device_manager),
process_function_library_runtime(process_function_library_runtime) {}
tfrt_stub::OpKernelRunState run_state;
OpKernelContext context;
const tensorflow::DeviceMgr& device_manager;
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime;
std::vector<xla::ifrt::Promise<tensorflow::Tensor>> results;
};
auto async_state = std::make_unique<AsyncState>(
input_tf_tensor_values, params, num_outputs,
fallback_request_state.device_manager(),
fallback_request_state.process_function_library_runtime());
ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry =
(*ifrt_model_context)->GetRestoreTensorRegistry();
for (int i = 0; i < num_outputs; ++i) {
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
const ResourceHandle& var_handle =
shard.var_handles[i].tensor().scalar<tensorflow::ResourceHandle>()();
TF_ASSIGN_OR_RETURN(ifrt_serving::DtypeAndShape dtype_and_shape,
ifrt_serving::GetDtypeAndShape(var_handle));
std::string runtime_name =
ifrt_serving::GetRuntimeNameFromVarHandle(var_handle);
ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo
restored_tensor_info = {false, std::move(dtype_and_shape),
std::move(future)};
if (auto status = ifrt_restore_tensor_registry.TryRegister(
runtime_name, restored_tensor_info);
!status.ok()) {
for (auto& result : async_state->results) {
std::move(result).Set(status);
};
return status;
}
async_state->results.push_back(std::move(promise));
}
DCHECK((*ifrt_model_context)->checkpoint_loader_queue() != nullptr);
(*ifrt_model_context)
->checkpoint_loader_queue()
->AddTask([runner = std::move(runner),
async_state = std::move(async_state),
shard = std::move(shard)]() {
auto* op_kernel_context_ptr = &async_state->context;
runner.Run(op_kernel_context_ptr);
auto& op_kernel_context = async_state->context;
if (!op_kernel_context.status().ok()) {
for (auto& result : async_state->results) {
std::move(result).Set(op_kernel_context.status());
}
return;
}
DCHECK_EQ(shard.var_handles.size(), op_kernel_context.num_outputs());
DCHECK_EQ(shard.truncate_in_cast.size(),
op_kernel_context.num_outputs());
for (int i = 0; i < op_kernel_context.num_outputs(); ++i) {
DCHECK(op_kernel_context.mutable_output(i));
if (op_kernel_context.mutable_output(i)->dtype() !=
shard.restored_dtypes[i]) {
std::move(async_state->results[i])
.Set(absl::InvalidArgumentError(absl::StrCat(
"The restored tensor has a different dtype than the "
"variable handle: ",
op_kernel_context.mutable_output(i)->dtype(), " vs. ",
shard.restored_dtypes[i])));
return;
}
const ResourceHandle& var_handle =
shard.var_handles[i]
.tensor()
.scalar<tensorflow::ResourceHandle>()();
if (shard.restored_dtypes[i] ==
var_handle.dtypes_and_shapes()[0].dtype) {
std::move(async_state->results[i])
.Set(*std::move(op_kernel_context.mutable_output(i)));
} else {
absl::StatusOr<tensorflow::Tensor> cast_output = Cast(
*op_kernel_context.mutable_output(i), shard.restored_dtypes[i],
var_handle.dtypes_and_shapes()[0].dtype,
shard.truncate_in_cast[i], async_state->device_manager,
async_state->process_function_library_runtime,
async_state->run_state.params);
if (!cast_output.ok()) {
std::move(async_state->results[i]).Set(cast_output.status());
} else {
std::move(async_state->results[i]).Set(*std::move(cast_output));
}
}
}
});
return absl::OkStatus();
}
absl::Status MlrtIfrtRestoreVariableKernel::ValidateInput() {
if (prefix().tensor().NumElements() != 1) {
return absl::InvalidArgumentError(
"The prefix tensor must be a scalar tensor.");
}
if (!TensorShapeUtils::IsVector(tensor_names().tensor().shape()) ||
!TensorShapeUtils::IsVector(shape_and_slices().tensor().shape())) {
return absl::InvalidArgumentError(
absl::StrCat("Input tensor_names and shape_and_slices "
"should be an 1-D tensors, got ",
tensor_names().tensor().shape().DebugString(), " and ",
shape_and_slices().tensor().shape().DebugString()));
}
if (tensor_names().tensor().NumElements() !=
shape_and_slices().tensor().NumElements()) {
return absl::InvalidArgumentError(
"The tensor_names and shape_and_slices tensors must have the same "
"number of elements.");
}
if (tensor_names().tensor().NumElements() != var_handles().size()) {
return absl::InvalidArgumentError(
"The tensor_names and var_handles must have the same number of "
"elements.");
}
if (tensor_names().tensor().NumElements() != restored_dtypes().size()) {
return absl::InvalidArgumentError(
"The tensor_names and restored_dtypes must have the same number of "
"elements.");
}
if (tensor_names().tensor().NumElements() != truncate_in_cast().size()) {
return absl::InvalidArgumentError(
"The tensor_names and truncate_in_cast must have the same number of "
"elements.");
}
return absl::OkStatus();
}
absl::Status MlrtIfrtRestoreVariableKernel::InvokeHelper() {
TF_RETURN_IF_ERROR(ValidateInput());
std::vector<int64_t> variable_sizes;
variable_sizes.reserve(var_handles().size());
for (auto& handle : var_handles()) {
variable_sizes.push_back(GetSizeFromVarHandle(
handle.tensor().scalar<tensorflow::ResourceHandle>()()));
}
std::vector<std::vector<int>> sharded_indices =
ShardVariables(kNumRestoreClusters, absl::MakeSpan(variable_sizes));
auto vector_to_tensor = [](const std::vector<tsl::tstring>& vec) {
tensorflow::Tensor tensor(tensorflow::DT_STRING,
TensorShape({static_cast<int>(vec.size())}));
for (int i = 0; i < vec.size(); ++i) {
tensor.flat<tsl::tstring>()(i) = vec[i];
}
return tensor;
};
const auto& tensor_names_flat = tensor_names().tensor().flat<tsl::tstring>();
const auto& shape_and_slices_flat =
shape_and_slices().tensor().flat<tsl::tstring>();
std::vector<RestoreVariableShard> shards;
shards.reserve(sharded_indices.size());
for (auto& sharded_index : sharded_indices) {
RestoreVariableShard shard;
shard.var_handles.reserve(sharded_index.size());
shard.truncate_in_cast.reserve(sharded_index.size());
shard.restored_dtypes.reserve(sharded_index.size());
std::vector<tsl::tstring> tensor_names;
std::vector<tsl::tstring> shape_and_slices;
shape_and_slices.reserve(sharded_index.size());
tensor_names.reserve(sharded_index.size());
for (int index : sharded_index) {
tensor_names.push_back(tensor_names_flat(index));
shape_and_slices.push_back(shape_and_slices_flat(index));
shard.dtypes_attr_value.mutable_list()->add_type(
restored_dtypes()[index]);
shard.var_handles.push_back(var_handles()[index]);
shard.restored_dtypes.push_back(restored_dtypes()[index]);
shard.truncate_in_cast.push_back(truncate_in_cast()[index]);
}
shard.prefix = prefix().tensor();
shard.tensor_names = vector_to_tensor(tensor_names);
shard.shape_and_slices = vector_to_tensor(shape_and_slices);
shards.push_back(std::move(shard));
}
for (const auto& shard : shards) {
TF_RETURN_IF_ERROR(RunShard(shard));
}
return absl::OkStatus();
}
class MlrtIfrtLoadVariableKernel : public mlrt::KernelFrame {
public:
using KernelFrame::KernelFrame;
static constexpr char kName[] = "tf_mlrt.ifrt_load_variable";
const tensorflow::Tensor& variable_handler_tensor() const {
DCHECK_GE(arguments().size(), 1);
const tensorflow::Tensor& ret =
arguments()[0].Get<tensorflow::tfrt_stub::FallbackTensor>().tensor();
DCHECK_EQ(ret.NumElements(), 1);
return ret;
}
bool used_by_host() const {
DCHECK_EQ(attributes().size(), 1);
return attributes().GetAs<bool>(0);
}
Context& context() { return execution_context().GetUserContext<Context>(); }
void Invoke();
private:
absl::Status InvokeHelper();
};
void MlrtIfrtLoadVariableKernel::Invoke() {
absl::Status status = InvokeHelper();
if (!status.ok()) {
execution_context().Fail(std::move(status));
return;
}
}
absl::Status MlrtIfrtLoadVariableKernel::InvokeHelper() {
DCHECK_EQ(2, results().size());
std::optional<IfrtModelContext*> ifrt_model_context =
context().resource_context().GetResource<IfrtModelContext>(
"IfrtModelContext");
if (!ifrt_model_context.has_value()) {
return absl::FailedPreconditionError(
"LoadVariableOp: failed to fetch IfrtModelContext: ");
}
auto tensor_promise =
mlrt::Promise::Allocate<tensorflow::tfrt_stub::FallbackTensor>();
auto tensor_future = tensor_promise.GetFuture();
ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry =
(*ifrt_model_context)->GetRestoreTensorRegistry();
auto& resource_handle = variable_handler_tensor().scalar<ResourceHandle>()();
std::string runtime_name =
ifrt_serving::GetRuntimeNameFromVarHandle(resource_handle);
if (used_by_host()) {
if (ifrt_restore_tensor_registry.SetUsedByHost(runtime_name).ok()) {
xla::ifrt::Future<tensorflow::Tensor> restored_tensor_future =
ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name);
restored_tensor_future.OnReady(
[tensor_promise = std::move(tensor_promise)](
absl::StatusOr<tensorflow::Tensor> restored_tensor) mutable {
if (!restored_tensor.ok()) {
std::move(tensor_promise).SetError(restored_tensor.status());
return;
}
std::move(tensor_promise)
.Set<tensorflow::tfrt_stub::FallbackTensor>(
tensorflow::tfrt_stub::FallbackTensor(*restored_tensor));
});
} else {
auto resource_manager = context()
.fallback_request_state()
.device_manager()
.HostCPU()
->resource_manager();
DCHECK(resource_manager);
Var* variable;
TF_RETURN_IF_ERROR(resource_manager->Lookup(
resource_handle.container(), resource_handle.name(), &variable));
if (tensorflow::Tensor* t = variable->tensor(); t != nullptr) {
std::move(tensor_promise)
.Set<tensorflow::tfrt_stub::FallbackTensor>(
tensorflow::tfrt_stub::FallbackTensor(*t));
} else {
std::move(tensor_promise)
.SetError(absl::InternalError(
absl::StrCat("Variable ", resource_handle.name(),
" is not found in either "
"IfrtRestoreTensorRegistry or ResourceManager")));
}
}
} else {
std::move(tensor_promise)
.Set<tensorflow::tfrt_stub::FallbackTensor>(
tensorflow::tfrt_stub::FallbackTensor());
}
tensorflow::Tensor key_tensor(tensorflow::DT_STRING, {});
key_tensor.scalar<tsl::tstring>()() = runtime_name;
results()[0].Set(tensorflow::tfrt_stub::FallbackTensor(key_tensor));
results()[1].Set(std::move(tensor_future));
return absl::OkStatus();
}
void RegisterTfMlrtIfrtKernels(mlrt::KernelRegistry& registry) {
registry.Register<MlrtIfrtLoadVariableKernel>();
registry.Register<MlrtIfrtRestoreVariableKernel>();
}
}
const bool kUnused = [] {
RegisterTfMlrtIfrtKernels(GetTfMlrtOptionalKernelRegistry());
return true;
}();
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/builtin_kernels.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/execute.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/interpreter_testutil.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/mlrt/kernel/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/tstring.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace tf_mlrt {
namespace {
using tensorflow::test::AsScalar;
using tensorflow::test::AsTensor;
using tensorflow::test::ExpectEqual;
using tensorflow::test::TensorEq;
constexpr absl::string_view kContainer = "test";
constexpr absl::string_view kSharedName = "y";
constexpr absl::string_view kVariableRuntimeName = "test__y";
tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static tsl::thread::ThreadPool* thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
std::string EncodeRestoreDtypesInt32(int num_outputs) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<tensorflow::DataType>>(
&allocator, num_outputs);
for (int i = 0; i < num_outputs; ++i) {
ctor.ConstructAt(i, tensorflow::DT_INT32);
}
return std::string(buffer.data(), buffer.size());
}
std::string EncodeTruncateInCast(int num_outputs) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<bool>>(&allocator, num_outputs);
for (int i = 0; i < num_outputs; ++i) {
ctor.ConstructAt(i, false);
}
return std::string(buffer.data(), buffer.size());
}
mlrt::bc::Buffer CreateExecutableForIfrtRestoreVariableOp(
int num_variables = 1) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto executable_ctor = mlrt::bc::New<mlrt::bc::Executable>(&allocator);
mlrt::testing::SymbolTable kernels;
std::vector<std::string> kernel_names = {
"tf_mlrt.createop", "tf_mlrt.executeop", "tf_mlrt.ifrt_restore_variable",
"return"};
executable_ctor.construct_kernel_names(kernel_names.size())
.Assign(kernel_names);
kernels.Def(kernel_names);
static constexpr int kNumAttributes =
5;
mlrt::testing::AttributeTable attributes(executable_ctor.construct_attributes(
kNumAttributes + 2 * (num_variables - 1)));
std::string restore_dtypes = EncodeRestoreDtypesInt32(num_variables);
attributes.Add("restore_dtypes", restore_dtypes);
std::vector<bool> truncate_in_cast(num_variables, false);
attributes.Add("truncate_in_cast", EncodeTruncateInCast(num_variables));
for (int i = 0; i < num_variables; ++i) {
attributes.Add(
absl::StrCat("var_handle_op_node_def", i),
absl::Substitute(
R"pb(name: "$0"
op: "VarHandleOp"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "container"
value { s: "$1" }
}
attr {
key: "shared_name"
value { s: "$2" }
}
attr {
key: "dtype"
value { type: DT_INT16 }
}
attr {
key: "shape"
value { shape { dim { size: 3 } } }
}
)pb",
absl::StrCat("VarHandleOp", i), kContainer,
absl::StrCat(kSharedName, i)));
attributes.Add(absl::StrCat("var_handle_op_key", i), i);
}
auto functions_ctor = executable_ctor.construct_functions(1);
{
auto function_ctor = functions_ctor.ConstructAt(0);
function_ctor.construct_name("main");
mlrt::testing::SymbolTable regs;
function_ctor.construct_input_regs(3).Assign(
regs.Def({"prefix_tensor", "name_tensor", "slice_tensor"}));
const int kNumKernels = 4;
auto kernels_ctor =
function_ctor.construct_kernels(kNumKernels + 2 * (num_variables - 1));
int kernel_index = 0;
std::vector<std::string> variable_handle_names;
variable_handle_names.reserve(num_variables);
for (int i = 0; i < num_variables; ++i) {
variable_handle_names.push_back(absl::StrCat("variable_handle", i));
std::string variable_handle_op_node_def =
absl::StrCat("var_handle_op_node_def", i);
std::string variable_handle_op_key = absl::StrCat("var_handle_op_key", i);
{
auto createop_ctor = kernels_ctor.ConstructAt(kernel_index);
createop_ctor.set_code(kernels.Use("tf_mlrt.createop"));
createop_ctor.construct_arguments(0);
createop_ctor.construct_results(0);
createop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle(variable_handle_op_node_def),
attributes.GetHandle(variable_handle_op_key)});
kernel_index++;
}
{
auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index);
executeop_ctor.set_code(kernels.Use("tf_mlrt.executeop"));
executeop_ctor.construct_arguments(0);
executeop_ctor.construct_results(1).Assign(
{regs.Def(variable_handle_names.back())});
executeop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle(variable_handle_op_node_def),
attributes.GetHandle(variable_handle_op_key)});
executeop_ctor.construct_last_uses(1).Assign({0});
kernel_index++;
}
}
{
std::vector<std::string> args;
args.reserve(3 + num_variables);
args.push_back("prefix_tensor");
args.push_back("name_tensor");
args.push_back("slice_tensor");
for (int i = 0; i < num_variables; ++i) {
args.push_back(variable_handle_names[i]);
}
auto restore_ctor = kernels_ctor.ConstructAt(kernel_index);
restore_ctor.set_code(kernels.Use("tf_mlrt.ifrt_restore_variable"));
restore_ctor.construct_arguments(args.size()).Assign(regs.Use(args));
restore_ctor.construct_results(0);
restore_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("restore_dtypes"),
attributes.GetHandle("truncate_in_cast")});
kernel_index++;
}
{
auto return_ctor = kernels_ctor.ConstructAt(kernel_index);
return_ctor.set_code(kernels.Use("return"));
return_ctor.construct_arguments(0);
kernel_index++;
}
function_ctor.set_num_regs(regs.size());
}
return buffer;
}
mlrt::bc::Buffer CreateExecutableForIfrtLoadVariableOp(
bool redundant_ifrt_load_variable_op = false, bool used_by_host = false) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto executable_ctor = mlrt::bc::New<mlrt::bc::Executable>(&allocator);
mlrt::testing::SymbolTable kernels;
std::vector<std::string> kernel_names = {
"tf_mlrt.createop", "tf_mlrt.executeop", "tf_mlrt.ifrt_load_variable",
"return"};
executable_ctor.construct_kernel_names(kernel_names.size())
.Assign(kernel_names);
kernels.Def(kernel_names);
mlrt::testing::AttributeTable attributes(
executable_ctor.construct_attributes(3));
attributes.Add("var_handle_op_node_def",
absl::Substitute(
R"pb(name: "VarHandleOp"
op: "VarHandleOp"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "container"
value { s: "$0" }
}
attr {
key: "shared_name"
value { s: "$1" }
}
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "shape"
value { shape { dim { size: 1 } } }
}
)pb",
kContainer, kSharedName));
attributes.Add("var_handle_op_key", 0);
attributes.Add("used_by_host", used_by_host);
auto functions_ctor = executable_ctor.construct_functions(1);
{
auto function_ctor = functions_ctor.ConstructAt(0);
function_ctor.construct_name("main");
mlrt::testing::SymbolTable regs;
function_ctor.construct_output_regs(2).Assign(
{regs.Def("output_tensor"), regs.Def("output_future")});
const int kNumKernels = 4 + (redundant_ifrt_load_variable_op ? 1 : 0);
auto kernels_ctor = function_ctor.construct_kernels(kNumKernels);
int kernel_index = 0;
{
auto createop_ctor = kernels_ctor.ConstructAt(kernel_index);
createop_ctor.set_code(kernels.Use("tf_mlrt.createop"));
createop_ctor.construct_arguments(0);
createop_ctor.construct_results(0);
createop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("var_handle_op_node_def"),
attributes.GetHandle("var_handle_op_key")});
kernel_index++;
}
{
auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index);
executeop_ctor.set_code(kernels.Use("tf_mlrt.executeop"));
executeop_ctor.construct_arguments(0);
executeop_ctor.construct_results(1).Assign({regs.Def("variable_handle")});
executeop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("var_handle_op_node_def"),
attributes.GetHandle("var_handle_op_key")});
kernel_index++;
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);
kernel_ctor.set_code(kernels.Use("tf_mlrt.ifrt_load_variable"));
kernel_ctor.construct_results(2).Assign(
{regs.Use("output_tensor"), regs.Use("output_future")});
kernel_ctor.construct_arguments(1).Assign({regs.Use("variable_handle")});
kernel_ctor.construct_attributes(1).Assign(
{attributes.GetHandle("used_by_host")});
kernel_ctor.construct_last_uses(1).Assign(
{redundant_ifrt_load_variable_op ? 0 : 1});
kernel_index++;
}
if (redundant_ifrt_load_variable_op) {
auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);
kernel_ctor.set_code(kernels.Use("tf_mlrt.ifrt_load_variable"));
kernel_ctor.construct_results(2).Assign(
{regs.Def("dummy"), regs.Def("dummy_future2")});
kernel_ctor.construct_attributes(1).Assign(
{attributes.GetHandle("used_by_host")});
kernel_ctor.construct_arguments(1).Assign({regs.Use("variable_handle")});
kernel_ctor.construct_last_uses(1).Assign({1});
kernel_index++;
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(2).Assign(
{regs.Use("output_tensor"), regs.Use("output_future")});
kernel_index++;
}
DCHECK_EQ(kernel_index, kNumKernels);
function_ctor.set_num_regs(regs.size());
}
return buffer;
}
class KernelTest : public ::testing::Test {
protected:
void SetUp() override {
mlrt::RegisterBuiltinKernels(registry_);
RegisterTfMlrtKernels(registry_);
execution_work_queue_ = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
restore_work_queue_ = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
TF_ASSERT_OK_AND_ASSIGN(fallback_state_, tfrt_stub::FallbackState::Create(
session_options_, fdef_lib_));
runner_ = [](const std::function<void()>& f) { f(); };
fallback_request_state_ =
std::make_unique<tfd::KernelFallbackCompatRequestState>(
&runner_, &fallback_state_->device_manager(), 0,
&runner_table_, &resource_array_,
nullptr,
std::nullopt,
&fallback_state_->process_function_library_runtime());
TF_ASSERT_OK_AND_ASSIGN(client_, xla::ifrt::test_util::GetClient());
resource_context_
.CreateResource<tensorflow::ifrt_serving::IfrtModelContext>(
"IfrtModelContext", client_, ifrt_core_selector_.get(),
&GetThreadPool(), nullptr);
tf_context_ = std::make_unique<Context>(fallback_request_state_.get(),
&resource_context_);
ifrt_model_context_ =
resource_context_
.GetResource<tensorflow::ifrt_serving::IfrtModelContext>(
"IfrtModelContext")
.value();
ifrt_model_context_->set_checkpoint_loader_queue(restore_work_queue_.get());
serving_device_selector_ =
std::make_unique<tsl::test_util::MockServingDeviceSelector>();
ifrt_core_selector_ =
std::make_unique<ifrt_serving::IfrtServingCoreSelector>(
serving_device_selector_.get(),
client_->addressable_device_count());
}
std::unique_ptr<tsl::test_util::MockServingDeviceSelector>
serving_device_selector_;
std::unique_ptr<ifrt_serving::IfrtServingCoreSelector> ifrt_core_selector_;
mlrt::KernelRegistry registry_;
std::unique_ptr<tfrt::ConcurrentWorkQueue> execution_work_queue_;
std::unique_ptr<tfrt::ConcurrentWorkQueue> restore_work_queue_;
tensorflow::SessionOptions session_options_;
tensorflow::FunctionDefLibrary fdef_lib_;
std::function<void(std::function<void()>)> runner_;
tfrt_stub::OpKernelRunnerTable runner_table_;
tfd::FallbackResourceArray resource_array_;
std::unique_ptr<tfrt_stub::FallbackState> fallback_state_;
tfrt::ResourceContext resource_context_;
std::shared_ptr<xla::ifrt::Client> client_;
std::unique_ptr<tfd::KernelFallbackCompatRequestState>
fallback_request_state_;
std::unique_ptr<Context> tf_context_;
tensorflow::ifrt_serving::IfrtModelContext* ifrt_model_context_;
};
TEST_F(KernelTest, IfrtLoadVariableOpCanGetTensorFromResourceManager) {
auto buffer = CreateExecutableForIfrtLoadVariableOp(
false, true);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
tensorflow::Tensor input_tensor;
TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));
input_tensor.scalar<int32_t>()() = 1234;
tsl::core::RefCountPtr<Var> variable(new Var(DT_INT32));
*variable->tensor() = input_tensor;
variable->is_initialized = true;
ASSERT_OK(
fallback_state_->device_manager().HostCPU()->resource_manager()->Create(
std::string(kContainer), std::string(kSharedName), &(*variable)));
std::vector<mlrt::Value> args;
std::vector<uint8_t> last_uses;
std::vector<mlrt::Value> results;
results.resize(2);
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(),
AsScalar(tsl::tstring(kVariableRuntimeName)));
auto returned_future = results[1].Get<mlrt::Future>();
ASSERT_TRUE(returned_future.IsReady());
EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(),
TensorEq(input_tensor));
}
TEST_F(KernelTest, IfrtLoadVariableOp) {
auto buffer = CreateExecutableForIfrtLoadVariableOp();
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
tensorflow::Tensor input_tensor;
TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));
input_tensor.scalar<int32_t>()() = 1234;
auto input_tensor_promise =
xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto input_tensor_future =
xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise);
ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo
restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(),
.shape = input_tensor.shape()},
.tensor_future = input_tensor_future};
input_tensor_promise.Set(input_tensor);
TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister(
kVariableRuntimeName, restore_tensor_info));
std::vector<mlrt::Value> args;
std::vector<uint8_t> last_uses;
std::vector<mlrt::Value> results;
results.resize(2);
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(),
AsScalar(tsl::tstring(kVariableRuntimeName)));
auto returned_future = results[1].Get<mlrt::Future>();
ASSERT_TRUE(returned_future.IsReady());
EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(),
TensorEq(tensorflow::Tensor()));
}
TEST_F(KernelTest, DuplicateIfrtLoadVariableOpShallSucceed) {
auto buffer = CreateExecutableForIfrtLoadVariableOp(
true);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
tensorflow::Tensor input_tensor;
TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));
input_tensor.scalar<int32_t>()() = 1234;
auto input_tensor_promise =
xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto input_tensor_future =
xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise);
ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo
restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(),
.shape = input_tensor.shape()},
.tensor_future = input_tensor_future};
input_tensor_promise.Set(input_tensor);
TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister(
kVariableRuntimeName, restore_tensor_info));
std::vector<mlrt::Value> args;
std::vector<uint8_t> last_uses;
std::vector<mlrt::Value> results;
results.resize(2);
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(),
AsScalar(tsl::tstring(kVariableRuntimeName)));
auto returned_future = results[1].Get<mlrt::Future>();
ASSERT_TRUE(returned_future.IsReady());
EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(),
TensorEq(tensorflow::Tensor()));
}
TEST_F(KernelTest, IfrtRestoreVariableOp) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
auto buffer = CreateExecutableForIfrtRestoreVariableOp();
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>({tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
xla::ifrt::Future<tensorflow::Tensor> restored_future =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 0));
absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await();
TF_ASSERT_OK(restored_tensor.status());
EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3})));
}
TEST_F(KernelTest, IfrtRestoreVariableOp4Variables) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
static constexpr int kNumVariables = 4;
auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w1/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w2/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w3/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>(
{tsl::tstring(""), tsl::tstring(""), tsl::tstring(""), tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
xla::ifrt::Future<tensorflow::Tensor> restored_future =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 0));
absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await();
TF_ASSERT_OK(restored_tensor.status());
EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future1 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 1));
absl::StatusOr<tensorflow::Tensor> restored_tensor1 =
restored_future1.Await();
TF_ASSERT_OK(restored_tensor1.status());
EXPECT_THAT(*restored_tensor1, TensorEq(AsTensor<int16_t>({4, 5, 6}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future2 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 2));
absl::StatusOr<tensorflow::Tensor> restored_tensor2 =
restored_future2.Await();
TF_ASSERT_OK(restored_tensor2.status());
EXPECT_THAT(*restored_tensor2, TensorEq(AsTensor<int16_t>({7, 8, 9}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future3 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 3));
absl::StatusOr<tensorflow::Tensor> restored_tensor3 =
restored_future3.Await();
TF_ASSERT_OK(restored_tensor3.status());
EXPECT_THAT(*restored_tensor3,
TensorEq(AsTensor<int16_t>({10, 11, 12}, {3})));
}
}
}
} | absl::Status MlrtIfrtRestoreVariableKernel::RunShard(
RestoreVariableShard shard) {
std::optional<IfrtModelContext*> ifrt_model_context =
context().resource_context().GetResource<IfrtModelContext>(
"IfrtModelContext");
if (!ifrt_model_context.has_value()) {
return absl::FailedPreconditionError(
"RestoreVariableOp: failed to fetch IfrtModelContext");
}
const int num_outputs = shard.var_handles.size();
DCHECK_EQ(num_outputs, shard.tensor_names.NumElements());
auto& fallback_request_state = context().fallback_request_state();
auto runner =
tfrt_stub::OpKernelRunner::Create(
"RestoreV2", "RestoreV2",
context().params().device->name(),
3,
[&](tensorflow::AttrValueMap* attr_value_map) {
attr_value_map->insert({"dtypes", shard.dtypes_attr_value});
return absl::OkStatus();
},
fallback_request_state.device_manager(),
fallback_request_state.process_function_library_runtime())
.value();
std::vector<tensorflow::TensorValue> input_tf_tensor_values;
static constexpr int kNumInputArgs = 3;
input_tf_tensor_values.resize(kNumInputArgs);
input_tf_tensor_values[0].tensor = &shard.prefix;
input_tf_tensor_values[1].tensor = &shard.tensor_names;
input_tf_tensor_values[2].tensor = &shard.shape_and_slices;
auto& params = context().params();
SetUpParams(runner, input_tf_tensor_values, params);
params.device = context().fallback_request_state().device_manager().HostCPU();
struct AsyncState {
explicit AsyncState(
const std::vector<tensorflow::TensorValue>& input_tf_tensor_values,
const OpKernelContext::Params& params, int num_outputs,
const tensorflow::DeviceMgr& device_manager,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime)
: run_state(input_tf_tensor_values, params),
context(&run_state.params, num_outputs),
device_manager(device_manager),
process_function_library_runtime(process_function_library_runtime) {}
tfrt_stub::OpKernelRunState run_state;
OpKernelContext context;
const tensorflow::DeviceMgr& device_manager;
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime;
std::vector<xla::ifrt::Promise<tensorflow::Tensor>> results;
};
auto async_state = std::make_unique<AsyncState>(
input_tf_tensor_values, params, num_outputs,
fallback_request_state.device_manager(),
fallback_request_state.process_function_library_runtime());
ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry =
(*ifrt_model_context)->GetRestoreTensorRegistry();
for (int i = 0; i < num_outputs; ++i) {
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
const ResourceHandle& var_handle =
shard.var_handles[i].tensor().scalar<tensorflow::ResourceHandle>()();
TF_ASSIGN_OR_RETURN(ifrt_serving::DtypeAndShape dtype_and_shape,
ifrt_serving::GetDtypeAndShape(var_handle));
std::string runtime_name =
ifrt_serving::GetRuntimeNameFromVarHandle(var_handle);
ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo
restored_tensor_info = {false, std::move(dtype_and_shape),
std::move(future)};
if (auto status = ifrt_restore_tensor_registry.TryRegister(
runtime_name, restored_tensor_info);
!status.ok()) {
for (auto& result : async_state->results) {
std::move(result).Set(status);
};
return status;
}
async_state->results.push_back(std::move(promise));
}
DCHECK((*ifrt_model_context)->checkpoint_loader_queue() != nullptr);
(*ifrt_model_context)
->checkpoint_loader_queue()
->AddTask([runner = std::move(runner),
async_state = std::move(async_state),
shard = std::move(shard)]() {
auto* op_kernel_context_ptr = &async_state->context;
runner.Run(op_kernel_context_ptr);
auto& op_kernel_context = async_state->context;
if (!op_kernel_context.status().ok()) {
for (auto& result : async_state->results) {
std::move(result).Set(op_kernel_context.status());
}
return;
}
DCHECK_EQ(shard.var_handles.size(), op_kernel_context.num_outputs());
DCHECK_EQ(shard.truncate_in_cast.size(),
op_kernel_context.num_outputs());
for (int i = 0; i < op_kernel_context.num_outputs(); ++i) {
DCHECK(op_kernel_context.mutable_output(i));
if (op_kernel_context.mutable_output(i)->dtype() !=
shard.restored_dtypes[i]) {
std::move(async_state->results[i])
.Set(absl::InvalidArgumentError(absl::StrCat(
"The restored tensor has a different dtype than the "
"variable handle: ",
op_kernel_context.mutable_output(i)->dtype(), " vs. ",
shard.restored_dtypes[i])));
return;
}
const ResourceHandle& var_handle =
shard.var_handles[i]
.tensor()
.scalar<tensorflow::ResourceHandle>()();
if (shard.restored_dtypes[i] ==
var_handle.dtypes_and_shapes()[0].dtype) {
std::move(async_state->results[i])
.Set(*std::move(op_kernel_context.mutable_output(i)));
} else {
absl::StatusOr<tensorflow::Tensor> cast_output = Cast(
*op_kernel_context.mutable_output(i), shard.restored_dtypes[i],
var_handle.dtypes_and_shapes()[0].dtype,
shard.truncate_in_cast[i], async_state->device_manager,
async_state->process_function_library_runtime,
async_state->run_state.params);
if (!cast_output.ok()) {
std::move(async_state->results[i]).Set(cast_output.status());
} else {
std::move(async_state->results[i]).Set(*std::move(cast_output));
}
}
}
});
return absl::OkStatus();
} | TEST_F(KernelTest, IfrtRestoreVariableOp) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
auto buffer = CreateExecutableForIfrtRestoreVariableOp();
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>({tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
xla::ifrt::Future<tensorflow::Tensor> restored_future =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 0));
absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await();
TF_ASSERT_OK(restored_tensor.status());
EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3})));
}
TEST_F(KernelTest, IfrtRestoreVariableOp4Variables) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
static constexpr int kNumVariables = 4;
auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w1/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w2/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w3/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>(
{tsl::tstring(""), tsl::tstring(""), tsl::tstring(""), tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
xla::ifrt::Future<tensorflow::Tensor> restored_future =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 0));
absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await();
TF_ASSERT_OK(restored_tensor.status());
EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future1 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 1));
absl::StatusOr<tensorflow::Tensor> restored_tensor1 =
restored_future1.Await();
TF_ASSERT_OK(restored_tensor1.status());
EXPECT_THAT(*restored_tensor1, TensorEq(AsTensor<int16_t>({4, 5, 6}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future2 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 2));
absl::StatusOr<tensorflow::Tensor> restored_tensor2 =
restored_future2.Await();
TF_ASSERT_OK(restored_tensor2.status());
EXPECT_THAT(*restored_tensor2, TensorEq(AsTensor<int16_t>({7, 8, 9}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future3 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 3));
absl::StatusOr<tensorflow::Tensor> restored_tensor3 =
restored_future3.Await();
TF_ASSERT_OK(restored_tensor3.status());
EXPECT_THAT(*restored_tensor3,
TensorEq(AsTensor<int16_t>({10, 11, 12}, {3})));
} |
#include "quiche/quic/core/quic_flow_controller.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "absl/strings/str_cat.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
#define ENDPOINT \
(perspective_ == Perspective::IS_SERVER ? "Server: " : "Client: ")
std::string QuicFlowController::LogLabel() {
if (is_connection_flow_controller_) {
return "connection";
}
return absl::StrCat("stream ", id_);
}
QuicFlowController::QuicFlowController(
QuicSession* session, QuicStreamId id, bool is_connection_flow_controller,
QuicStreamOffset send_window_offset, QuicStreamOffset receive_window_offset,
QuicByteCount receive_window_size_limit,
bool should_auto_tune_receive_window,
QuicFlowControllerInterface* session_flow_controller)
: session_(session),
connection_(session->connection()),
id_(id),
is_connection_flow_controller_(is_connection_flow_controller),
perspective_(session->perspective()),
bytes_sent_(0),
send_window_offset_(send_window_offset),
bytes_consumed_(0),
highest_received_byte_offset_(0),
receive_window_offset_(receive_window_offset),
receive_window_size_(receive_window_offset),
receive_window_size_limit_(receive_window_size_limit),
auto_tune_receive_window_(should_auto_tune_receive_window),
session_flow_controller_(session_flow_controller),
last_blocked_send_window_offset_(0),
prev_window_update_time_(QuicTime::Zero()) {
QUICHE_DCHECK_LE(receive_window_size_, receive_window_size_limit_);
QUICHE_DCHECK_EQ(
is_connection_flow_controller_,
QuicUtils::GetInvalidStreamId(session_->transport_version()) == id_);
QUIC_DVLOG(1) << ENDPOINT << "Created flow controller for " << LogLabel()
<< ", setting initial receive window offset to: "
<< receive_window_offset_
<< ", max receive window to: " << receive_window_size_
<< ", max receive window limit to: "
<< receive_window_size_limit_
<< ", setting send window offset to: " << send_window_offset_;
}
void QuicFlowController::AddBytesConsumed(QuicByteCount bytes_consumed) {
bytes_consumed_ += bytes_consumed;
QUIC_DVLOG(1) << ENDPOINT << LogLabel() << " consumed " << bytes_consumed_
<< " bytes.";
MaybeSendWindowUpdate();
}
bool QuicFlowController::UpdateHighestReceivedOffset(
QuicStreamOffset new_offset) {
if (new_offset <= highest_received_byte_offset_) {
return false;
}
QUIC_DVLOG(1) << ENDPOINT << LogLabel()
<< " highest byte offset increased from "
<< highest_received_byte_offset_ << " to " << new_offset;
highest_received_byte_offset_ = new_offset;
return true;
}
void QuicFlowController::AddBytesSent(QuicByteCount bytes_sent) {
if (bytes_sent_ + bytes_sent > send_window_offset_) {
QUIC_BUG(quic_bug_10836_1)
<< ENDPOINT << LogLabel() << " Trying to send an extra " << bytes_sent
<< " bytes, when bytes_sent = " << bytes_sent_
<< ", and send_window_offset_ = " << send_window_offset_;
bytes_sent_ = send_window_offset_;
connection_->CloseConnection(
QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA,
absl::StrCat(send_window_offset_ - (bytes_sent_ + bytes_sent),
"bytes over send window offset"),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
bytes_sent_ += bytes_sent;
QUIC_DVLOG(1) << ENDPOINT << LogLabel() << " sent " << bytes_sent_
<< " bytes.";
}
bool QuicFlowController::FlowControlViolation() {
if (highest_received_byte_offset_ > receive_window_offset_) {
QUIC_DLOG(INFO) << ENDPOINT << "Flow control violation on " << LogLabel()
<< ", receive window offset: " << receive_window_offset_
<< ", highest received byte offset: "
<< highest_received_byte_offset_;
return true;
}
return false;
}
void QuicFlowController::MaybeIncreaseMaxWindowSize() {
QuicTime now = connection_->clock()->ApproximateNow();
QuicTime prev = prev_window_update_time_;
prev_window_update_time_ = now;
if (!prev.IsInitialized()) {
QUIC_DVLOG(1) << ENDPOINT << "first window update for " << LogLabel();
return;
}
if (!auto_tune_receive_window_) {
return;
}
QuicTime::Delta rtt =
connection_->sent_packet_manager().GetRttStats()->smoothed_rtt();
if (rtt.IsZero()) {
QUIC_DVLOG(1) << ENDPOINT << "rtt zero for " << LogLabel();
return;
}
QuicTime::Delta since_last = now - prev;
QuicTime::Delta two_rtt = 2 * rtt;
if (since_last >= two_rtt) {
return;
}
QuicByteCount old_window = receive_window_size_;
IncreaseWindowSize();
if (receive_window_size_ > old_window) {
QUIC_DVLOG(1) << ENDPOINT << "New max window increase for " << LogLabel()
<< " after " << since_last.ToMicroseconds()
<< " us, and RTT is " << rtt.ToMicroseconds()
<< "us. max wndw: " << receive_window_size_;
if (session_flow_controller_ != nullptr) {
session_flow_controller_->EnsureWindowAtLeast(
kSessionFlowControlMultiplier * receive_window_size_);
}
} else {
QUIC_LOG_FIRST_N(INFO, 1)
<< ENDPOINT << "Max window at limit for " << LogLabel() << " after "
<< since_last.ToMicroseconds() << " us, and RTT is "
<< rtt.ToMicroseconds() << "us. Limit size: " << receive_window_size_;
}
}
void QuicFlowController::IncreaseWindowSize() {
receive_window_size_ *= 2;
receive_window_size_ =
std::min(receive_window_size_, receive_window_size_limit_);
}
QuicByteCount QuicFlowController::WindowUpdateThreshold() {
return receive_window_size_ / 2;
}
void QuicFlowController::MaybeSendWindowUpdate() {
if (!session_->connection()->connected()) {
return;
}
QUICHE_DCHECK_LE(bytes_consumed_, receive_window_offset_);
QuicStreamOffset available_window = receive_window_offset_ - bytes_consumed_;
QuicByteCount threshold = WindowUpdateThreshold();
if (!prev_window_update_time_.IsInitialized()) {
prev_window_update_time_ = connection_->clock()->ApproximateNow();
}
if (available_window >= threshold) {
QUIC_DVLOG(1) << ENDPOINT << "Not sending WindowUpdate for " << LogLabel()
<< ", available window: " << available_window
<< " >= threshold: " << threshold;
return;
}
MaybeIncreaseMaxWindowSize();
UpdateReceiveWindowOffsetAndSendWindowUpdate(available_window);
}
void QuicFlowController::UpdateReceiveWindowOffsetAndSendWindowUpdate(
QuicStreamOffset available_window) {
receive_window_offset_ += (receive_window_size_ - available_window);
QUIC_DVLOG(1) << ENDPOINT << "Sending WindowUpdate frame for " << LogLabel()
<< ", consumed bytes: " << bytes_consumed_
<< ", available window: " << available_window
<< ", and threshold: " << WindowUpdateThreshold()
<< ", and receive window size: " << receive_window_size_
<< ". New receive window offset is: " << receive_window_offset_;
SendWindowUpdate();
}
void QuicFlowController::MaybeSendBlocked() {
if (SendWindowSize() != 0 ||
last_blocked_send_window_offset_ >= send_window_offset_) {
return;
}
QUIC_DLOG(INFO) << ENDPOINT << LogLabel() << " is flow control blocked. "
<< "Send window: " << SendWindowSize()
<< ", bytes sent: " << bytes_sent_
<< ", send limit: " << send_window_offset_;
last_blocked_send_window_offset_ = send_window_offset_;
session_->SendBlocked(id_, last_blocked_send_window_offset_);
}
bool QuicFlowController::UpdateSendWindowOffset(
QuicStreamOffset new_send_window_offset) {
if (new_send_window_offset <= send_window_offset_) {
return false;
}
QUIC_DVLOG(1) << ENDPOINT << "UpdateSendWindowOffset for " << LogLabel()
<< " with new offset " << new_send_window_offset
<< " current offset: " << send_window_offset_
<< " bytes_sent: " << bytes_sent_;
const bool was_previously_blocked = IsBlocked();
send_window_offset_ = new_send_window_offset;
return was_previously_blocked;
}
void QuicFlowController::EnsureWindowAtLeast(QuicByteCount window_size) {
if (receive_window_size_limit_ >= window_size) {
return;
}
QuicStreamOffset available_window = receive_window_offset_ - bytes_consumed_;
IncreaseWindowSize();
UpdateReceiveWindowOffsetAndSendWindowUpdate(available_window);
}
bool QuicFlowController::IsBlocked() const { return SendWindowSize() == 0; }
uint64_t QuicFlowController::SendWindowSize() const {
if (bytes_sent_ > send_window_offset_) {
return 0;
}
return send_window_offset_ - bytes_sent_;
}
void QuicFlowController::UpdateReceiveWindowSize(QuicStreamOffset size) {
QUICHE_DCHECK_LE(size, receive_window_size_limit_);
QUIC_DVLOG(1) << ENDPOINT << "UpdateReceiveWindowSize for " << LogLabel()
<< ": " << size;
if (receive_window_size_ != receive_window_offset_) {
QUIC_BUG(quic_bug_10836_2)
<< "receive_window_size_:" << receive_window_size_
<< " != receive_window_offset:" << receive_window_offset_;
return;
}
receive_window_size_ = size;
receive_window_offset_ = size;
}
void QuicFlowController::SendWindowUpdate() {
QuicStreamId id = id_;
if (is_connection_flow_controller_) {
id = QuicUtils::GetInvalidStreamId(connection_->transport_version());
}
session_->SendWindowUpdate(id, receive_window_offset_);
}
} | #include "quiche/quic/core/quic_flow_controller.h"
#include <memory>
#include <utility>
#include "absl/strings/str_cat.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_flow_controller_peer.h"
#include "quiche/quic/test_tools/quic_sent_packet_manager_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
using testing::_;
using testing::Invoke;
using testing::StrictMock;
namespace quic {
namespace test {
const int64_t kRtt = 100;
class MockFlowController : public QuicFlowControllerInterface {
public:
MockFlowController() {}
MockFlowController(const MockFlowController&) = delete;
MockFlowController& operator=(const MockFlowController&) = delete;
~MockFlowController() override {}
MOCK_METHOD(void, EnsureWindowAtLeast, (QuicByteCount), (override));
};
class QuicFlowControllerTest : public QuicTest {
public:
void Initialize() {
connection_ = new MockQuicConnection(&helper_, &alarm_factory_,
Perspective::IS_CLIENT);
connection_->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(connection_->perspective()));
session_ = std::make_unique<StrictMock<MockQuicSession>>(connection_);
flow_controller_ = std::make_unique<QuicFlowController>(
session_.get(), stream_id_, false,
send_window_, receive_window_, kStreamReceiveWindowLimit,
should_auto_tune_receive_window_, &session_flow_controller_);
}
protected:
QuicStreamId stream_id_ = 1234;
QuicByteCount send_window_ = kInitialSessionFlowControlWindowForTest;
QuicByteCount receive_window_ = kInitialSessionFlowControlWindowForTest;
std::unique_ptr<QuicFlowController> flow_controller_;
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
MockQuicConnection* connection_;
std::unique_ptr<StrictMock<MockQuicSession>> session_;
MockFlowController session_flow_controller_;
bool should_auto_tune_receive_window_ = false;
};
TEST_F(QuicFlowControllerTest, SendingBytes) {
Initialize();
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(send_window_, flow_controller_->SendWindowSize());
flow_controller_->AddBytesSent(send_window_ / 2);
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_EQ(send_window_ / 2, flow_controller_->SendWindowSize());
flow_controller_->AddBytesSent(send_window_ / 2);
EXPECT_TRUE(flow_controller_->IsBlocked());
EXPECT_EQ(0u, flow_controller_->SendWindowSize());
EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1);
flow_controller_->MaybeSendBlocked();
EXPECT_TRUE(flow_controller_->UpdateSendWindowOffset(2 * send_window_));
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_EQ(send_window_, flow_controller_->SendWindowSize());
EXPECT_FALSE(flow_controller_->UpdateSendWindowOffset(send_window_ / 10));
EXPECT_EQ(send_window_, flow_controller_->SendWindowSize());
EXPECT_QUIC_BUG(
{
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA, _, _));
flow_controller_->AddBytesSent(send_window_ * 10);
EXPECT_TRUE(flow_controller_->IsBlocked());
EXPECT_EQ(0u, flow_controller_->SendWindowSize());
},
absl::StrCat("Trying to send an extra ", send_window_ * 10, " bytes"));
}
TEST_F(QuicFlowControllerTest, ReceivingBytes) {
Initialize();
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
EXPECT_TRUE(
flow_controller_->UpdateHighestReceivedOffset(1 + receive_window_ / 2));
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ((receive_window_ / 2) - 1,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
EXPECT_CALL(*session_, WriteControlFrame(_, _)).Times(1);
flow_controller_->AddBytesConsumed(1 + receive_window_ / 2);
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
}
TEST_F(QuicFlowControllerTest, Move) {
Initialize();
flow_controller_->AddBytesSent(send_window_ / 2);
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_EQ(send_window_ / 2, flow_controller_->SendWindowSize());
EXPECT_TRUE(
flow_controller_->UpdateHighestReceivedOffset(1 + receive_window_ / 2));
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ((receive_window_ / 2) - 1,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
QuicFlowController flow_controller2(std::move(*flow_controller_));
EXPECT_EQ(send_window_ / 2, flow_controller2.SendWindowSize());
EXPECT_FALSE(flow_controller2.FlowControlViolation());
EXPECT_EQ((receive_window_ / 2) - 1,
QuicFlowControllerPeer::ReceiveWindowSize(&flow_controller2));
}
TEST_F(QuicFlowControllerTest, OnlySendBlockedFrameOncePerOffset) {
Initialize();
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(send_window_, flow_controller_->SendWindowSize());
flow_controller_->AddBytesSent(send_window_);
EXPECT_TRUE(flow_controller_->IsBlocked());
EXPECT_EQ(0u, flow_controller_->SendWindowSize());
EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1);
flow_controller_->MaybeSendBlocked();
EXPECT_CALL(*session_, SendBlocked(_, _)).Times(0);
flow_controller_->MaybeSendBlocked();
flow_controller_->MaybeSendBlocked();
flow_controller_->MaybeSendBlocked();
flow_controller_->MaybeSendBlocked();
flow_controller_->MaybeSendBlocked();
EXPECT_TRUE(flow_controller_->UpdateSendWindowOffset(2 * send_window_));
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_EQ(send_window_, flow_controller_->SendWindowSize());
flow_controller_->AddBytesSent(send_window_);
EXPECT_TRUE(flow_controller_->IsBlocked());
EXPECT_EQ(0u, flow_controller_->SendWindowSize());
EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1);
flow_controller_->MaybeSendBlocked();
}
TEST_F(QuicFlowControllerTest, ReceivingBytesFastIncreasesFlowWindow) {
should_auto_tune_receive_window_ = true;
Initialize();
EXPECT_CALL(*session_, WriteControlFrame(_, _)).Times(1);
EXPECT_TRUE(flow_controller_->auto_tune_receive_window());
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
QuicSentPacketManager* manager =
QuicConnectionPeer::GetSentPacketManager(connection_);
RttStats* rtt_stats = const_cast<RttStats*>(manager->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kRtt),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
QuicByteCount threshold =
QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get());
QuicStreamOffset receive_offset = threshold + 1;
EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset));
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest - receive_offset,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
EXPECT_CALL(
session_flow_controller_,
EnsureWindowAtLeast(kInitialSessionFlowControlWindowForTest * 2 * 1.5));
flow_controller_->AddBytesConsumed(threshold + 1);
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(2 * kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(2 * kRtt - 1));
receive_offset += threshold + 1;
EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset));
flow_controller_->AddBytesConsumed(threshold + 1);
EXPECT_FALSE(flow_controller_->FlowControlViolation());
QuicByteCount new_threshold =
QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get());
EXPECT_GT(new_threshold, threshold);
}
TEST_F(QuicFlowControllerTest, ReceivingBytesFastNoAutoTune) {
Initialize();
EXPECT_CALL(*session_, WriteControlFrame(_, _))
.Times(2)
.WillRepeatedly(Invoke(&ClearControlFrameWithTransmissionType));
EXPECT_FALSE(flow_controller_->auto_tune_receive_window());
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
QuicSentPacketManager* manager =
QuicConnectionPeer::GetSentPacketManager(connection_);
RttStats* rtt_stats = const_cast<RttStats*>(manager->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kRtt),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
QuicByteCount threshold =
QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get());
QuicStreamOffset receive_offset = threshold + 1;
EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset));
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest - receive_offset,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
flow_controller_->AddBytesConsumed(threshold + 1);
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(2 * kRtt - 1));
receive_offset += threshold + 1;
EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset));
flow_controller_->AddBytesConsumed(threshold + 1);
EXPECT_FALSE(flow_controller_->FlowControlViolation());
QuicByteCount new_threshold =
QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get());
EXPECT_EQ(new_threshold, threshold);
}
TEST_F(QuicFlowControllerTest, ReceivingBytesNormalStableFlowWindow) {
should_auto_tune_receive_window_ = true;
Initialize();
EXPECT_CALL(*session_, WriteControlFrame(_, _)).Times(1);
EXPECT_TRUE(flow_controller_->auto_tune_receive_window());
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
QuicSentPacketManager* manager =
QuicConnectionPeer::GetSentPacketManager(connection_);
RttStats* rtt_stats = const_cast<RttStats*>(manager->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kRtt),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
QuicByteCount threshold =
QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get());
QuicStreamOffset receive_offset = threshold + 1;
EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset));
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest - receive_offset,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
EXPECT_CALL(
session_flow_controller_,
EnsureWindowAtLeast(kInitialSessionFlowControlWindowForTest * 2 * 1.5));
flow_controller_->AddBytesConsumed(threshold + 1);
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(2 * kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(2 * kRtt + 1));
receive_offset += threshold + 1;
EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset));
flow_controller_->AddBytesConsumed(threshold + 1);
EXPECT_FALSE(flow_controller_->FlowControlViolation());
QuicByteCount new_threshold =
QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get());
EXPECT_EQ(new_threshold, 2 * threshold);
}
TEST_F(QuicFlowControllerTest, ReceivingBytesNormalNoAutoTune) {
Initialize();
EXPECT_CALL(*session_, WriteControlFrame(_, _))
.Times(2)
.WillRepeatedly(Invoke(&ClearControlFrameWithTransmissionType));
EXPECT_FALSE(flow_controller_->auto_tune_receive_window());
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
QuicSentPacketManager* manager =
QuicConnectionPeer::GetSentPacketManager(connection_);
RttStats* rtt_stats = const_cast<RttStats*>(manager->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kRtt),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
QuicByteCount threshold =
QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get());
QuicStreamOffset receive_offset = threshold + 1;
EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset));
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest - receive_offset,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
flow_controller_->AddBytesConsumed(threshold + 1);
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(kInitialSessionFlowControlWindowForTest,
QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get()));
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(2 * kRtt + 1));
receive_offset += threshold + 1;
EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset));
flow_controller_->AddBytesConsumed(threshold + 1);
EXPECT_FALSE(flow_controller_->FlowControlViolation());
QuicByteCount new_threshold =
QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get());
EXPECT_EQ(new_threshold, threshold);
}
}
} | void QuicFlowController::AddBytesSent(QuicByteCount bytes_sent) {
if (bytes_sent_ + bytes_sent > send_window_offset_) {
QUIC_BUG(quic_bug_10836_1)
<< ENDPOINT << LogLabel() << " Trying to send an extra " << bytes_sent
<< " bytes, when bytes_sent = " << bytes_sent_
<< ", and send_window_offset_ = " << send_window_offset_;
bytes_sent_ = send_window_offset_;
connection_->CloseConnection(
QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA,
absl::StrCat(send_window_offset_ - (bytes_sent_ + bytes_sent),
"bytes over send window offset"),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
bytes_sent_ += bytes_sent;
QUIC_DVLOG(1) << ENDPOINT << LogLabel() << " sent " << bytes_sent_
<< " bytes.";
} | TEST_F(QuicFlowControllerTest, SendingBytes) {
Initialize();
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_FALSE(flow_controller_->FlowControlViolation());
EXPECT_EQ(send_window_, flow_controller_->SendWindowSize());
flow_controller_->AddBytesSent(send_window_ / 2);
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_EQ(send_window_ / 2, flow_controller_->SendWindowSize());
flow_controller_->AddBytesSent(send_window_ / 2);
EXPECT_TRUE(flow_controller_->IsBlocked());
EXPECT_EQ(0u, flow_controller_->SendWindowSize());
EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1);
flow_controller_->MaybeSendBlocked();
EXPECT_TRUE(flow_controller_->UpdateSendWindowOffset(2 * send_window_));
EXPECT_FALSE(flow_controller_->IsBlocked());
EXPECT_EQ(send_window_, flow_controller_->SendWindowSize());
EXPECT_FALSE(flow_controller_->UpdateSendWindowOffset(send_window_ / 10));
EXPECT_EQ(send_window_, flow_controller_->SendWindowSize());
EXPECT_QUIC_BUG(
{
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA, _, _));
flow_controller_->AddBytesSent(send_window_ * 10);
EXPECT_TRUE(flow_controller_->IsBlocked());
EXPECT_EQ(0u, flow_controller_->SendWindowSize());
},
absl::StrCat("Trying to send an extra ", send_window_ * 10, " bytes"));
} |
#include "quiche/common/quiche_random.h"
#include <cstdint>
#include <cstring>
#include "openssl/rand.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace {
inline uint64_t Xoshiro256InitializeRngStateMember() {
uint64_t result;
RAND_bytes(reinterpret_cast<uint8_t*>(&result), sizeof(result));
return result;
}
inline uint64_t Xoshiro256PlusPlusRotLeft(uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
uint64_t Xoshiro256PlusPlus() {
static thread_local uint64_t rng_state[4] = {
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember()};
const uint64_t result =
Xoshiro256PlusPlusRotLeft(rng_state[0] + rng_state[3], 23) + rng_state[0];
const uint64_t t = rng_state[1] << 17;
rng_state[2] ^= rng_state[0];
rng_state[3] ^= rng_state[1];
rng_state[1] ^= rng_state[2];
rng_state[0] ^= rng_state[3];
rng_state[2] ^= t;
rng_state[3] = Xoshiro256PlusPlusRotLeft(rng_state[3], 45);
return result;
}
class DefaultQuicheRandom : public QuicheRandom {
public:
DefaultQuicheRandom() {}
DefaultQuicheRandom(const DefaultQuicheRandom&) = delete;
DefaultQuicheRandom& operator=(const DefaultQuicheRandom&) = delete;
~DefaultQuicheRandom() override {}
void RandBytes(void* data, size_t len) override;
uint64_t RandUint64() override;
void InsecureRandBytes(void* data, size_t len) override;
uint64_t InsecureRandUint64() override;
};
void DefaultQuicheRandom::RandBytes(void* data, size_t len) {
RAND_bytes(reinterpret_cast<uint8_t*>(data), len);
}
uint64_t DefaultQuicheRandom::RandUint64() {
uint64_t value;
RandBytes(&value, sizeof(value));
return value;
}
void DefaultQuicheRandom::InsecureRandBytes(void* data, size_t len) {
while (len >= sizeof(uint64_t)) {
uint64_t random_bytes64 = Xoshiro256PlusPlus();
memcpy(data, &random_bytes64, sizeof(uint64_t));
data = reinterpret_cast<char*>(data) + sizeof(uint64_t);
len -= sizeof(uint64_t);
}
if (len > 0) {
QUICHE_DCHECK_LT(len, sizeof(uint64_t));
uint64_t random_bytes64 = Xoshiro256PlusPlus();
memcpy(data, &random_bytes64, len);
}
}
uint64_t DefaultQuicheRandom::InsecureRandUint64() {
return Xoshiro256PlusPlus();
}
}
QuicheRandom* QuicheRandom::GetInstance() {
static DefaultQuicheRandom* random = new DefaultQuicheRandom();
return random;
}
} | #include "quiche/common/quiche_random.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace {
TEST(QuicheRandom, RandBytes) {
unsigned char buf1[16];
unsigned char buf2[16];
memset(buf1, 0xaf, sizeof(buf1));
memset(buf2, 0xaf, sizeof(buf2));
ASSERT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
auto rng = QuicheRandom::GetInstance();
rng->RandBytes(buf1, sizeof(buf1));
EXPECT_NE(0, memcmp(buf1, buf2, sizeof(buf1)));
}
TEST(QuicheRandom, RandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->RandUint64();
uint64_t value2 = rng->RandUint64();
EXPECT_NE(value1, value2);
}
TEST(QuicheRandom, InsecureRandBytes) {
unsigned char buf1[16];
unsigned char buf2[16];
memset(buf1, 0xaf, sizeof(buf1));
memset(buf2, 0xaf, sizeof(buf2));
ASSERT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
auto rng = QuicheRandom::GetInstance();
rng->InsecureRandBytes(buf1, sizeof(buf1));
EXPECT_NE(0, memcmp(buf1, buf2, sizeof(buf1)));
}
TEST(QuicheRandom, InsecureRandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->InsecureRandUint64();
uint64_t value2 = rng->InsecureRandUint64();
EXPECT_NE(value1, value2);
}
}
} | uint64_t InsecureRandUint64() override;
};
void DefaultQuicheRandom::RandBytes(void* data, size_t len) {
RAND_bytes(reinterpret_cast<uint8_t*>(data), len);
} | TEST(QuicheRandom, InsecureRandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->InsecureRandUint64();
uint64_t value2 = rng->InsecureRandUint64();
EXPECT_NE(value1, value2);
} |
#include "quiche/quic/core/io/socket.h"
#include <cerrno>
#include <climits>
#include <cstddef>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/core/io/socket_internal.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/common/platform/api/quiche_logging.h"
#if defined(_WIN32)
#include "quiche/quic/core/io/socket_win.inc"
#else
#include "quiche/quic/core/io/socket_posix.inc"
#endif
namespace quic::socket_api {
namespace {
absl::StatusOr<AcceptResult> AcceptInternal(SocketFd fd) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
sockaddr_storage peer_addr;
PlatformSocklen peer_addr_len = sizeof(peer_addr);
SocketFd connection_socket = SyscallAccept(
fd, reinterpret_cast<struct sockaddr*>(&peer_addr), &peer_addr_len);
if (connection_socket == kInvalidSocketFd) {
absl::Status status = LastSocketOperationError("::accept()");
QUICHE_DVLOG(1) << "Failed to accept connection from socket " << fd
<< " with error: " << status;
return status;
}
absl::StatusOr<QuicSocketAddress> peer_address =
ValidateAndConvertAddress(peer_addr, peer_addr_len);
if (peer_address.ok()) {
return AcceptResult{connection_socket, *peer_address};
} else {
return peer_address.status();
}
}
absl::Status SetSockOptInt(SocketFd fd, int level, int option, int value) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
int result = SyscallSetsockopt(fd, level, option, &value, sizeof(value));
if (result >= 0) {
return absl::OkStatus();
} else {
absl::Status status = LastSocketOperationError("::setsockopt()");
QUICHE_DVLOG(1) << "Failed to set socket " << fd << " option " << option
<< " to " << value << " with error: " << status;
return status;
}
}
}
absl::Status SetReceiveBufferSize(SocketFd fd, QuicByteCount size) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK_LE(size, QuicByteCount{INT_MAX});
return SetSockOptInt(fd, SOL_SOCKET, SO_RCVBUF, static_cast<int>(size));
}
absl::Status SetSendBufferSize(SocketFd fd, QuicByteCount size) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK_LE(size, QuicByteCount{INT_MAX});
return SetSockOptInt(fd, SOL_SOCKET, SO_SNDBUF, static_cast<int>(size));
}
absl::Status Connect(SocketFd fd, const QuicSocketAddress& peer_address) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(peer_address.IsInitialized());
sockaddr_storage addr = peer_address.generic_address();
PlatformSocklen addrlen = GetAddrlen(peer_address.host().address_family());
int connect_result =
SyscallConnect(fd, reinterpret_cast<sockaddr*>(&addr), addrlen);
if (connect_result >= 0) {
return absl::OkStatus();
} else {
absl::Status status =
LastSocketOperationError("::connect()",
{EINPROGRESS});
QUICHE_DVLOG(1) << "Failed to connect socket " << fd
<< " to address: " << peer_address.ToString()
<< " with error: " << status;
return status;
}
}
absl::Status GetSocketError(SocketFd fd) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
int socket_error = 0;
PlatformSocklen len = sizeof(socket_error);
int sockopt_result =
SyscallGetsockopt(fd, SOL_SOCKET, SO_ERROR, &socket_error, &len);
if (sockopt_result >= 0) {
if (socket_error == 0) {
return absl::OkStatus();
} else {
return ToStatus(socket_error, "SO_ERROR");
}
} else {
absl::Status status = LastSocketOperationError("::getsockopt()");
QUICHE_LOG_FIRST_N(ERROR, 100)
<< "Failed to get socket error information from socket " << fd
<< " with error: " << status;
return status;
}
}
absl::Status Bind(SocketFd fd, const QuicSocketAddress& address) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(address.IsInitialized());
sockaddr_storage addr = address.generic_address();
PlatformSocklen addr_len = GetAddrlen(address.host().address_family());
int result = SyscallBind(fd, reinterpret_cast<sockaddr*>(&addr), addr_len);
if (result >= 0) {
return absl::OkStatus();
} else {
absl::Status status = LastSocketOperationError("::bind()");
QUICHE_DVLOG(1) << "Failed to bind socket " << fd
<< " to address: " << address.ToString()
<< " with error: " << status;
return status;
}
}
absl::StatusOr<QuicSocketAddress> GetSocketAddress(SocketFd fd) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
sockaddr_storage addr;
PlatformSocklen addr_len = sizeof(addr);
int result =
SyscallGetsockname(fd, reinterpret_cast<sockaddr*>(&addr), &addr_len);
if (result >= 0) {
return ValidateAndConvertAddress(addr, addr_len);
} else {
absl::Status status = LastSocketOperationError("::getsockname()");
QUICHE_DVLOG(1) << "Failed to get socket " << fd
<< " name with error: " << status;
return status;
}
}
absl::Status Listen(SocketFd fd, int backlog) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK_GT(backlog, 0);
int result = SyscallListen(fd, backlog);
if (result >= 0) {
return absl::OkStatus();
} else {
absl::Status status = LastSocketOperationError("::listen()");
QUICHE_DVLOG(1) << "Failed to mark socket: " << fd
<< " to listen with error :" << status;
return status;
}
}
absl::StatusOr<AcceptResult> Accept(SocketFd fd, bool blocking) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
#if defined(HAS_ACCEPT4)
if (!blocking) {
return AcceptWithFlags(fd, SOCK_NONBLOCK);
}
#endif
absl::StatusOr<AcceptResult> accept_result = AcceptInternal(fd);
if (!accept_result.ok() || blocking) {
return accept_result;
}
#if !defined(__linux__) || !defined(SOCK_NONBLOCK)
absl::Status set_non_blocking_result =
SetSocketBlocking(accept_result->fd, false);
if (!set_non_blocking_result.ok()) {
QUICHE_LOG_FIRST_N(ERROR, 100)
<< "Failed to set socket " << fd << " as non-blocking on acceptance.";
if (!Close(accept_result->fd).ok()) {
QUICHE_LOG_FIRST_N(ERROR, 100)
<< "Failed to close socket " << accept_result->fd
<< " after error setting non-blocking on acceptance.";
}
return set_non_blocking_result;
}
#endif
return accept_result;
}
absl::StatusOr<absl::Span<char>> Receive(SocketFd fd, absl::Span<char> buffer,
bool peek) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(!buffer.empty());
PlatformSsizeT num_read = SyscallRecv(fd, buffer.data(), buffer.size(),
peek ? MSG_PEEK : 0);
if (num_read > 0 && static_cast<size_t>(num_read) > buffer.size()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Received more bytes (" << num_read << ") from socket " << fd
<< " than buffer size (" << buffer.size() << ").";
return absl::OutOfRangeError(
"::recv(): Received more bytes than buffer size.");
} else if (num_read >= 0) {
return buffer.subspan(0, num_read);
} else {
absl::Status status = LastSocketOperationError("::recv()");
QUICHE_DVLOG(1) << "Failed to receive from socket: " << fd
<< " with error: " << status;
return status;
}
}
absl::StatusOr<absl::string_view> Send(SocketFd fd, absl::string_view buffer) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(!buffer.empty());
PlatformSsizeT num_sent =
SyscallSend(fd, buffer.data(), buffer.size(), 0);
if (num_sent > 0 && static_cast<size_t>(num_sent) > buffer.size()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Sent more bytes (" << num_sent << ") to socket " << fd
<< " than buffer size (" << buffer.size() << ").";
return absl::OutOfRangeError("::send(): Sent more bytes than buffer size.");
} else if (num_sent >= 0) {
return buffer.substr(num_sent);
} else {
absl::Status status = LastSocketOperationError("::send()");
QUICHE_DVLOG(1) << "Failed to send to socket: " << fd
<< " with error: " << status;
return status;
}
}
absl::StatusOr<absl::string_view> SendTo(SocketFd fd,
const QuicSocketAddress& peer_address,
absl::string_view buffer) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(peer_address.IsInitialized());
QUICHE_DCHECK(!buffer.empty());
sockaddr_storage addr = peer_address.generic_address();
PlatformSocklen addrlen = GetAddrlen(peer_address.host().address_family());
PlatformSsizeT num_sent =
SyscallSendTo(fd, buffer.data(), buffer.size(),
0, reinterpret_cast<sockaddr*>(&addr), addrlen);
if (num_sent > 0 && static_cast<size_t>(num_sent) > buffer.size()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Sent more bytes (" << num_sent << ") to socket " << fd
<< " to address: " << peer_address.ToString() << " than buffer size ("
<< buffer.size() << ").";
return absl::OutOfRangeError(
"::sendto(): Sent more bytes than buffer size.");
} else if (num_sent >= 0) {
return buffer.substr(num_sent);
} else {
absl::Status status = LastSocketOperationError("::sendto()");
QUICHE_DVLOG(1) << "Failed to send to socket: " << fd
<< " to address: " << peer_address.ToString()
<< " with error: " << status;
return status;
}
}
} | #include "quiche/quic/core/io/socket.h"
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_ip_address_family.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/test_tools/test_ip_packets.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/platform/api/quiche_test_loopback.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic::test {
namespace {
using quiche::test::QuicheTest;
using quiche::test::StatusIs;
using testing::Lt;
using testing::SizeIs;
SocketFd CreateTestSocket(socket_api::SocketProtocol protocol,
bool blocking = true) {
absl::StatusOr<SocketFd> socket = socket_api::CreateSocket(
quiche::TestLoopback().address_family(), protocol, blocking);
if (socket.ok()) {
return socket.value();
} else {
QUICHE_CHECK(false);
return kInvalidSocketFd;
}
}
SocketFd CreateTestRawSocket(
bool blocking = true,
IpAddressFamily address_family = IpAddressFamily::IP_UNSPEC) {
absl::StatusOr<SocketFd> socket;
switch (address_family) {
case IpAddressFamily::IP_V4:
socket = socket_api::CreateSocket(
quiche::TestLoopback4().address_family(),
socket_api::SocketProtocol::kRawIp, blocking);
break;
case IpAddressFamily::IP_V6:
socket = socket_api::CreateSocket(
quiche::TestLoopback6().address_family(),
socket_api::SocketProtocol::kRawIp, blocking);
break;
case IpAddressFamily::IP_UNSPEC:
socket = socket_api::CreateSocket(quiche::TestLoopback().address_family(),
socket_api::SocketProtocol::kRawIp,
blocking);
break;
}
if (socket.ok()) {
return socket.value();
} else {
QUICHE_CHECK(absl::IsPermissionDenied(socket.status()) ||
absl::IsNotFound(socket.status()));
return kInvalidSocketFd;
}
}
TEST(SocketTest, CreateAndCloseSocket) {
QuicIpAddress localhost_address = quiche::TestLoopback();
absl::StatusOr<SocketFd> created_socket = socket_api::CreateSocket(
localhost_address.address_family(), socket_api::SocketProtocol::kUdp);
QUICHE_EXPECT_OK(created_socket.status());
QUICHE_EXPECT_OK(socket_api::Close(created_socket.value()));
}
TEST(SocketTest, CreateAndCloseRawSocket) {
QuicIpAddress localhost_address = quiche::TestLoopback();
absl::StatusOr<SocketFd> created_socket = socket_api::CreateSocket(
localhost_address.address_family(), socket_api::SocketProtocol::kRawIp);
if (!created_socket.ok()) {
EXPECT_THAT(created_socket.status(),
StatusIs(absl::StatusCode::kPermissionDenied));
return;
}
QUICHE_EXPECT_OK(socket_api::Close(created_socket.value()));
}
TEST(SocketTest, SetSocketBlocking) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
QUICHE_EXPECT_OK(socket_api::SetSocketBlocking(socket, false));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetReceiveBufferSize) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
QUICHE_EXPECT_OK(socket_api::SetReceiveBufferSize(socket, 100));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetSendBufferSize) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
QUICHE_EXPECT_OK(socket_api::SetSendBufferSize(socket, 100));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetIpHeaderIncludedForRaw) {
SocketFd socket =
CreateTestRawSocket(true, IpAddressFamily::IP_V4);
if (socket == kInvalidSocketFd) {
GTEST_SKIP();
}
QUICHE_EXPECT_OK(socket_api::SetIpHeaderIncluded(
socket, IpAddressFamily::IP_V4, true));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetIpHeaderIncludedForRawV6) {
SocketFd socket =
CreateTestRawSocket(true, IpAddressFamily::IP_V6);
if (socket == kInvalidSocketFd) {
GTEST_SKIP();
}
QUICHE_EXPECT_OK(socket_api::SetIpHeaderIncluded(
socket, IpAddressFamily::IP_V6, true));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetIpHeaderIncludedForUdp) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
EXPECT_THAT(socket_api::SetIpHeaderIncluded(socket, IpAddressFamily::IP_V4,
true),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(socket_api::SetIpHeaderIncluded(socket, IpAddressFamily::IP_V6,
true),
StatusIs(absl::StatusCode::kInvalidArgument));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Connect) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_EXPECT_OK(socket_api::Connect(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, GetSocketError) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
absl::Status error = socket_api::GetSocketError(socket);
QUICHE_EXPECT_OK(error);
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Bind) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_EXPECT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, GetSocketAddress) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
absl::StatusOr<QuicSocketAddress> address =
socket_api::GetSocketAddress(socket);
QUICHE_EXPECT_OK(address);
EXPECT_TRUE(address.value().IsInitialized());
EXPECT_EQ(address.value().host(), quiche::TestLoopback());
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Listen) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kTcp);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
QUICHE_EXPECT_OK(socket_api::Listen(socket, 5));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Accept) {
SocketFd socket =
CreateTestSocket(socket_api::SocketProtocol::kTcp, false);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
QUICHE_ASSERT_OK(socket_api::Listen(socket, 5));
absl::StatusOr<socket_api::AcceptResult> result = socket_api::Accept(socket);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kUnavailable));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Receive) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
false);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
std::string buffer(100, 0);
absl::StatusOr<absl::Span<char>> result =
socket_api::Receive(socket, absl::MakeSpan(buffer));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kUnavailable));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Peek) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
false);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
std::string buffer(100, 0);
absl::StatusOr<absl::Span<char>> result =
socket_api::Receive(socket, absl::MakeSpan(buffer), true);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kUnavailable));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Send) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_ASSERT_OK(socket_api::Connect(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
char buffer[] = {12, 34, 56, 78};
absl::StatusOr<absl::string_view> result =
socket_api::Send(socket, absl::string_view(buffer, sizeof(buffer)));
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(4)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SendTo) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
char buffer[] = {12, 34, 56, 78};
absl::StatusOr<absl::string_view> result = socket_api::SendTo(
socket, QuicSocketAddress(quiche::TestLoopback(), 57290),
absl::string_view(buffer, sizeof(buffer)));
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(4)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SendToWithConnection) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_ASSERT_OK(socket_api::Connect(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
char buffer[] = {12, 34, 56, 78};
absl::StatusOr<absl::string_view> result = socket_api::SendTo(
socket, QuicSocketAddress(quiche::TestLoopback(), 50495),
absl::string_view(buffer, sizeof(buffer)));
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(4)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SendToForRaw) {
SocketFd socket = CreateTestRawSocket(true);
if (socket == kInvalidSocketFd) {
GTEST_SKIP();
}
QuicIpAddress localhost_address = quiche::TestLoopback();
QUICHE_EXPECT_OK(socket_api::SetIpHeaderIncluded(
socket, localhost_address.address_family(),
false));
QuicSocketAddress client_address(localhost_address, 53368);
QuicSocketAddress server_address(localhost_address, 56362);
std::string packet = CreateUdpPacket(client_address, server_address, "foo");
absl::StatusOr<absl::string_view> result = socket_api::SendTo(
socket, QuicSocketAddress(localhost_address, 56362), packet);
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(packet.size())));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SendToForRawWithIpHeader) {
SocketFd socket = CreateTestRawSocket(true);
if (socket == kInvalidSocketFd) {
GTEST_SKIP();
}
QuicIpAddress localhost_address = quiche::TestLoopback();
QUICHE_EXPECT_OK(socket_api::SetIpHeaderIncluded(
socket, localhost_address.address_family(), true));
QuicSocketAddress client_address(localhost_address, 53368);
QuicSocketAddress server_address(localhost_address, 56362);
std::string packet =
CreateIpPacket(client_address.host(), server_address.host(),
CreateUdpPacket(client_address, server_address, "foo"));
absl::StatusOr<absl::string_view> result = socket_api::SendTo(
socket, QuicSocketAddress(localhost_address, 56362), packet);
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(packet.size())));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
}
} | absl::Status Connect(SocketFd fd, const QuicSocketAddress& peer_address) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(peer_address.IsInitialized());
sockaddr_storage addr = peer_address.generic_address();
PlatformSocklen addrlen = GetAddrlen(peer_address.host().address_family());
int connect_result =
SyscallConnect(fd, reinterpret_cast<sockaddr*>(&addr), addrlen);
if (connect_result >= 0) {
return absl::OkStatus();
} else {
absl::Status status =
LastSocketOperationError("::connect()",
{EINPROGRESS});
QUICHE_DVLOG(1) << "Failed to connect socket " << fd
<< " to address: " << peer_address.ToString()
<< " with error: " << status;
return status;
}
} | TEST(SocketTest, Connect) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_EXPECT_OK(socket_api::Connect(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Send) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_ASSERT_OK(socket_api::Connect(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
char buffer[] = {12, 34, 56, 78};
absl::StatusOr<absl::string_view> result =
socket_api::Send(socket, absl::string_view(buffer, sizeof(buffer)));
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(4)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SendToWithConnection) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_ASSERT_OK(socket_api::Connect(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
char buffer[] = {12, 34, 56, 78};
absl::StatusOr<absl::string_view> result = socket_api::SendTo(
socket, QuicSocketAddress(quiche::TestLoopback(), 50495),
absl::string_view(buffer, sizeof(buffer)));
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(4)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
} |
#include "tensorflow/core/graph/subgraph.h"
#include <algorithm>
#include <deque>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace subgraph {
namespace {
typedef std::unordered_map<StringPiece, Node*, StringPieceHasher> NameIndex;
Status FeedInputs(
Graph* g, const std::vector<std::unique_ptr<PruneRewrite>>& feed_rewrites,
NameIndex* name_index, DataTypeVector* out_feed_types) {
out_feed_types->clear();
out_feed_types->reserve(feed_rewrites.size());
for (size_t i = 0; i < feed_rewrites.size(); ++i) {
const string& t = feed_rewrites[i]->endpoint_name();
TensorId id(ParseTensorName(t));
auto iter = name_index->find(id.first);
if (iter == name_index->end()) {
return errors::NotFound("FeedInputs: unable to find feed output ", t);
}
Node* n = iter->second;
DCHECK_EQ(n->name(), id.first);
if (id.second >= n->num_outputs()) {
return errors::InvalidArgument(
"FeedInputs: ", t, " should have output index < ", n->num_outputs());
}
Node* feed_node;
TF_RETURN_IF_ERROR(
feed_rewrites[i]->AddNode(g, {n, id.second}, &feed_node));
(*name_index)[feed_node->name()] = feed_node;
g->AddControlEdge(g->source_node(), feed_node, true);
std::vector<const Edge*> to_remove;
for (const Edge* e : n->out_edges()) {
if (e->src_output() == id.second) {
to_remove.emplace_back(e);
} else if (e->src_output() == Graph::kControlSlot &&
(n->type_string() == "Placeholder" ||
n->type_string() == "PlaceholderV2")) {
to_remove.emplace_back(e);
}
}
for (const Edge* e : to_remove) {
if (e->src_output() == id.second) {
g->AddEdge(feed_node, 0, e->dst(), e->dst_input());
} else {
CHECK_EQ(Graph::kControlSlot, e->src_output());
g->AddControlEdge(feed_node, e->dst(), true);
}
g->RemoveEdge(e);
}
out_feed_types->push_back(BaseType(n->output_type(id.second)));
}
return absl::OkStatus();
}
Status FetchOutputs(
Graph* g, const std::vector<std::unique_ptr<PruneRewrite>>& fetch_rewrites,
NameIndex* name_index, std::vector<Node*>* out_fetch_nodes,
DataTypeVector* out_fetch_types) {
out_fetch_nodes->clear();
out_fetch_nodes->reserve(fetch_rewrites.size());
for (size_t i = 0; i < fetch_rewrites.size(); ++i) {
const string& t = fetch_rewrites[i]->endpoint_name();
TensorId id(ParseTensorName(t));
auto iter = name_index->find(id.first);
if (iter == name_index->end()) {
return errors::NotFound("FetchOutputs node ", t, ": not found");
}
Node* n = iter->second;
DCHECK_EQ(n->name(), id.first);
VLOG(2) << "Found fetch node for " << t;
if (n->num_outputs() == 0) {
return errors::InvalidArgument(
"Tried to fetch data for '", t,
"', which produces no output. To run to a node but not fetch any "
"data, pass '",
t,
"' as an argument to the 'target_node_names' argument of the "
"Session::Run API.");
} else if (id.second >= n->num_outputs()) {
return errors::InvalidArgument("FetchOutputs ", t,
": output index too large, must be < ",
n->num_outputs());
}
Node* fetch_node;
TF_RETURN_IF_ERROR(
fetch_rewrites[i]->AddNode(g, {n, id.second}, &fetch_node));
(*name_index)[fetch_node->name()] = fetch_node;
g->AddControlEdge(fetch_node, g->sink_node(), true);
out_fetch_nodes->push_back(fetch_node);
out_fetch_types->push_back(BaseType(n->output_type(id.second)));
}
return absl::OkStatus();
}
bool AddNodeToTargets(const string& node_or_tensor_name,
const NameIndex& name_index,
std::unordered_set<const Node*>* targets) {
TensorId id = ParseTensorName(node_or_tensor_name);
auto iter = name_index.find(id.first);
if (iter == name_index.end()) {
return false;
}
const Node* n = iter->second;
CHECK_EQ(n->name(), id.first);
targets->insert(n);
return true;
}
Status PruneForTargets(Graph* g, const NameIndex& name_index,
const std::vector<Node*>& fetch_nodes,
const absl::Span<const string>& target_nodes) {
string not_found;
std::unordered_set<const Node*> targets;
for (Node* n : fetch_nodes) {
if (!AddNodeToTargets(n->name(), name_index, &targets)) {
strings::StrAppend(¬_found, n->name(), " ");
}
}
for (const string& s : target_nodes) {
if (!AddNodeToTargets(s, name_index, &targets)) {
strings::StrAppend(¬_found, s, " ");
}
}
if (!not_found.empty()) {
return errors::NotFound("PruneForTargets: Some target nodes not found: ",
not_found);
}
PruneForReverseReachability(g, std::move(targets));
FixupSourceAndSinkEdges(g);
return absl::OkStatus();
}
}
Status ArgFeedRewrite::AddNode(Graph* g, NodeBuilder::NodeOut feed_tensor,
Node** out_node) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat("_arg_", feed_tensor.node->name(), "_",
feed_tensor.index, "_", arg_index_),
"_Arg")
.Attr("T", BaseType(feed_tensor.node->output_type(feed_tensor.index)))
.Attr("index", arg_index_)
.Finalize(g, out_node, true));
(*out_node)->set_assigned_device_name(device_info().name());
return absl::OkStatus();
}
Status RecvFeedRewrite::AddNode(Graph* g, NodeBuilder::NodeOut feed_tensor,
Node** out_node) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat("_recv_", feed_tensor.node->name(), "_",
feed_tensor.index),
"_Recv")
.Attr("tensor_type",
BaseType(feed_tensor.node->output_type(feed_tensor.index)))
.Attr("tensor_name", endpoint_name())
.Attr("send_device", device_info().name())
.Attr("recv_device", device_info().name())
.Attr("send_device_incarnation",
static_cast<int64_t>(device_info().incarnation()))
.Attr("client_terminated", true)
.Finalize(g, out_node, true));
(*out_node)->set_assigned_device_name(device_info().name());
return absl::OkStatus();
}
Status RetvalFetchRewrite::AddNode(Graph* g, NodeBuilder::NodeOut fetch_tensor,
Node** out_node) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat("_retval_", fetch_tensor.node->name(), "_",
fetch_tensor.index, "_", retval_index_),
"_Retval")
.Input(fetch_tensor.node, fetch_tensor.index)
.Attr("T",
BaseType(fetch_tensor.node->output_type(fetch_tensor.index)))
.Attr("index", retval_index_)
.Finalize(g, out_node, true));
(*out_node)->set_assigned_device_name(device_info().name());
return absl::OkStatus();
}
Status SendFetchRewrite::AddNode(Graph* g, NodeBuilder::NodeOut fetch_tensor,
Node** out_node) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat("_send_", fetch_tensor.node->name(), "_",
fetch_tensor.index),
"_Send")
.Input(fetch_tensor.node, fetch_tensor.index)
.Attr("tensor_name", endpoint_name())
.Attr("send_device", device_info().name())
.Attr("recv_device", device_info().name())
.Attr("send_device_incarnation",
static_cast<int64_t>(device_info().incarnation()))
.Attr("client_terminated", true)
.Finalize(g, out_node, true));
(*out_node)->set_assigned_device_name(device_info().name());
return absl::OkStatus();
}
Status RewriteGraphForExecution(
Graph* g, const absl::Span<const string>& fed_outputs,
const absl::Span<const string>& fetch_outputs,
const absl::Span<const string>& target_node_names,
const DeviceAttributes& device_info, bool use_function_convention,
RewriteGraphMetadata* out_metadata) {
std::vector<std::unique_ptr<PruneRewrite>> feed_rewrites;
feed_rewrites.reserve(fed_outputs.size());
if (use_function_convention) {
for (size_t i = 0; i < fed_outputs.size(); ++i) {
feed_rewrites.emplace_back(new ArgFeedRewrite(
&fed_outputs[i], &device_info, static_cast<int32>(i)));
}
} else {
for (const string& fed_output : fed_outputs) {
feed_rewrites.emplace_back(
new RecvFeedRewrite(&fed_output, &device_info));
}
}
std::vector<std::unique_ptr<PruneRewrite>> fetch_rewrites;
fetch_rewrites.reserve(fetch_outputs.size());
if (use_function_convention) {
for (size_t i = 0; i < fetch_outputs.size(); ++i) {
fetch_rewrites.emplace_back(new RetvalFetchRewrite(
&fetch_outputs[i], &device_info, static_cast<int32>(i)));
}
} else {
for (const string& fetch_output : fetch_outputs) {
fetch_rewrites.emplace_back(
new SendFetchRewrite(&fetch_output, &device_info));
}
}
return RewriteGraphForExecution(g, feed_rewrites, fetch_rewrites,
target_node_names, out_metadata);
}
namespace {
template <typename StringContainer>
std::vector<string> ConvertToVector(StringContainer field) {
return std::vector<string>(field.begin(), field.end());
}
}
Status RewriteGraphForExecution(
Graph* g, const std::vector<std::unique_ptr<PruneRewrite>>& feed_rewrites,
const std::vector<std::unique_ptr<PruneRewrite>>& fetch_rewrites,
const absl::Span<const string>& target_node_names,
RewriteGraphMetadata* out_metadata) {
if (fetch_rewrites.empty() && target_node_names.empty()) {
return errors::InvalidArgument(
"Must specify at least one target to fetch or execute.");
}
std::unordered_set<string> endpoints;
for (const auto& feed_rewrite : feed_rewrites) {
auto result = endpoints.insert(feed_rewrite->endpoint_name());
if (!result.second) {
return errors::InvalidArgument("Endpoint \"",
feed_rewrite->endpoint_name(),
"\" fed more than once.");
}
}
for (const auto& fetch_rewrite : fetch_rewrites) {
if (endpoints.count(fetch_rewrite->endpoint_name()) > 0) {
return errors::InvalidArgument(fetch_rewrite->endpoint_name(),
" is both fed and fetched.");
}
}
NameIndex name_index;
name_index.reserve(g->num_nodes());
for (Node* n : g->nodes()) {
name_index[n->name()] = n;
}
if (!feed_rewrites.empty()) {
TF_RETURN_IF_ERROR(
FeedInputs(g, feed_rewrites, &name_index, &out_metadata->feed_types));
}
std::vector<Node*> fetch_nodes;
if (!fetch_rewrites.empty()) {
TF_RETURN_IF_ERROR(FetchOutputs(g, fetch_rewrites, &name_index,
&fetch_nodes, &out_metadata->fetch_types));
}
if (!fetch_nodes.empty() || !target_node_names.empty()) {
TF_RETURN_IF_ERROR(
PruneForTargets(g, name_index, fetch_nodes, target_node_names));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/graph/subgraph.h"
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class SubgraphTest : public ::testing::Test {
protected:
SubgraphTest() : g_(new Graph(OpRegistry::Global())) {
device_info_.set_name("/job:a/replica:0/task:0/cpu:0");
device_info_.set_device_type(DeviceType(DEVICE_CPU).type());
device_info_.set_incarnation(0);
}
~SubgraphTest() override {}
void ExpectOK(const string& gdef_ascii) {
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef_));
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, gdef_, g_.get()));
}
Node* FindNode(const string& name) {
for (Node* n : g_->nodes()) {
if (n->name() == name) return n;
}
return nullptr;
}
bool HasNode(const string& name) { return FindNode(name) != nullptr; }
void ExpectNodes(const string& nodes) {
int count = 0;
std::vector<string> actual_nodes;
for (Node* n : g_->nodes()) {
if (n->IsOp()) {
count++;
actual_nodes.push_back(n->name());
}
}
std::sort(actual_nodes.begin(), actual_nodes.end());
LOG(INFO) << "Nodes present: " << absl::StrJoin(actual_nodes, " ");
std::vector<string> expected_nodes = str_util::Split(nodes, ',');
std::sort(expected_nodes.begin(), expected_nodes.end());
for (const string& s : expected_nodes) {
Node* n = FindNode(s);
EXPECT_TRUE(n != nullptr) << s;
if (n->type_string() == "_Send" || n->type_string() == "_Recv") {
EXPECT_EQ(device_info_.name(), n->assigned_device_name()) << s;
}
}
EXPECT_TRUE(actual_nodes.size() == expected_nodes.size())
<< "\nActual: " << absl::StrJoin(actual_nodes, ",")
<< "\nExpected: " << absl::StrJoin(expected_nodes, ",");
}
bool HasEdge(const string& src, int src_out, const string& dst, int dst_in) {
for (const Edge* e : g_->edges()) {
if (e->src()->name() == src && e->src_output() == src_out &&
e->dst()->name() == dst && e->dst_input() == dst_in)
return true;
}
return false;
}
bool HasControlEdge(const string& src, const string& dst) {
return HasEdge(src, Graph::kControlSlot, dst, Graph::kControlSlot);
}
string Subgraph(const string& fed_str, const string& fetch_str,
const string& targets_str,
bool use_function_convention = false) {
Graph* subgraph = new Graph(OpRegistry::Global());
CopyGraph(*g_, subgraph);
std::vector<string> fed =
str_util::Split(fed_str, ',', str_util::SkipEmpty());
std::vector<string> fetch =
str_util::Split(fetch_str, ',', str_util::SkipEmpty());
std::vector<string> targets =
str_util::Split(targets_str, ',', str_util::SkipEmpty());
subgraph::RewriteGraphMetadata metadata;
Status s = subgraph::RewriteGraphForExecution(
subgraph, fed, fetch, targets, device_info_, use_function_convention,
&metadata);
if (!s.ok()) {
delete subgraph;
return s.ToString();
}
EXPECT_EQ(fed.size(), metadata.feed_types.size());
EXPECT_EQ(fetch.size(), metadata.fetch_types.size());
g_.reset(subgraph);
return "OK";
}
Graph* graph() { return g_.get(); }
private:
GraphDef gdef_;
std::unique_ptr<Graph> g_;
DeviceAttributes device_info_;
};
REGISTER_OP("TestParams").Output("o: float");
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_OP("TestRelu").Input("i: float").Output("o: float");
REGISTER_OP("TestMul").Input("a: float").Input("b: float").Output("o: float");
TEST_F(SubgraphTest, Targets1) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "", "t1"));
ExpectNodes("W1,input,t1");
}
TEST_F(SubgraphTest, Targets2) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: 'W1' input: 'input:1' }"
"node { name: 't2' op: 'TestMul' input: 'W2' input: 't1' }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "", "t2,t3_a"));
ExpectNodes("W1,W2,input,t1,t2,t3_a");
}
TEST_F(SubgraphTest, FedOutputs1) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("input:1", "", "t2"));
ExpectNodes("W1,W2,_recv_input_1,t1,t2");
}
TEST_F(SubgraphTest, FedOutputs1_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK",
Subgraph("input:1", "", "t2", true ));
ExpectNodes("W1,W2,_arg_input_1_0,t1,t2");
}
TEST_F(SubgraphTest, FedRefNode) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 't1' op: 'TestMul' input: [ 'W2', 'W1' ] }");
EXPECT_EQ("OK", Subgraph("W1:0", "", "t1"));
ExpectNodes("_recv_W1_0,W2,t1");
Node* n = FindNode("_recv_W1_0");
EXPECT_FALSE(IsRefType(CHECK_NOTNULL(n)->output_type(0)));
}
TEST_F(SubgraphTest, FedRefNode_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 't1' op: 'TestMul' input: [ 'W2', 'W1' ] }");
EXPECT_EQ("OK",
Subgraph("W1:0", "", "t1", true ));
ExpectNodes("_arg_W1_0_0,W2,t1");
Node* n = FindNode("_arg_W1_0_0");
EXPECT_FALSE(IsRefType(CHECK_NOTNULL(n)->output_type(0)));
}
TEST_F(SubgraphTest, FedOutputs2_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("input:1,t1,W2", "", "t2",
true ));
ExpectNodes("_arg_t1_0_1,_arg_W2_0_2,t2");
}
TEST_F(SubgraphTest, FetchOutputs1) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "W2,input:1,t1,t2", "t2"));
ExpectNodes(
"W1,W2,input,t1,t2,_send_W2_0,_send_input_1,_send_t1_0,_send_t2_0");
}
TEST_F(SubgraphTest, FetchOutputs1_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "W2,input:1,t1,t2", "t2",
true ));
ExpectNodes(
"W1,W2,input,t1,t2,_retval_W2_0_0,_retval_input_1_1,_retval_t1_0_2,_"
"retval_t2_0_3");
}
TEST_F(SubgraphTest, FetchOutputs2) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "t3_a", "t2"));
ExpectNodes("W1,W2,input,t1,t2,t3_a,_send_t3_a_0");
}
TEST_F(SubgraphTest, FetchOutputs2_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK",
Subgraph("", "t3_a", "t2", true ));
ExpectNodes("W1,W2,input,t1,t2,t3_a,_retval_t3_a_0_0");
}
TEST_F(SubgraphTest, ChainOfFools) {
ExpectOK(
"node { name: 'a' op: 'TestParams' }"
"node { name: 'b' op: 'TestRelu' input: 'a'}"
"node { name: 'c' op: 'TestRelu' input: 'b'}"
"node { name: 'd' op: 'TestRelu' input: 'c'}"
"node { name: 'e' op: 'TestRelu' input: 'd'}"
"node { name: 'f' op: 'TestRelu' input: 'e'}");
EXPECT_EQ("OK", Subgraph("c:0", "b:0,e:0", ""));
ExpectNodes("a,b,_send_b_0,_recv_c_0,d,e,_send_e_0");
EXPECT_TRUE(HasEdge("a", 0, "b", 0));
EXPECT_TRUE(HasEdge("b", 0, "_send_b_0", 0));
EXPECT_TRUE(HasEdge("_recv_c_0", 0, "d", 0));
EXPECT_TRUE(HasEdge("d", 0, "e", 0));
EXPECT_TRUE(HasEdge("e", 0, "_send_e_0", 0));
}
static bool HasSubstr(StringPiece base, StringPiece substr) {
bool ok = absl::StrContains(base, substr);
EXPECT_TRUE(ok) << base << ", expected substring " << substr;
return ok;
}
TEST_F(SubgraphTest, Errors) {
ExpectOK(
"node { name: 'a' op: 'TestParams' }"
"node { name: 'b' op: 'TestRelu' input: 'a'}"
"node { name: 'c' op: 'TestRelu' input: 'b'}"
"node { name: 'd' op: 'TestRelu' input: 'c'}"
"node { name: 'e' op: 'TestRelu' input: 'd'}"
"node { name: 'f' op: 'TestRelu' input: 'e'}");
EXPECT_TRUE(
HasSubstr(Subgraph("c:0", "b:0,c:0", ""), "both fed and fetched"));
EXPECT_TRUE(HasSubstr(Subgraph("foo:0", "c:0", ""), "unable to find"));
EXPECT_TRUE(HasSubstr(Subgraph("", "foo:0", ""), "not found"));
EXPECT_TRUE(HasSubstr(Subgraph("", "", "foo"), "not found"));
EXPECT_TRUE(HasSubstr(Subgraph("", "", ""), "at least one target"));
}
REGISTER_OP("In").Output("o: float");
REGISTER_OP("Op").Input("i: float").Output("o: float");
void BM_SubgraphHelper(::testing::benchmark::State& state,
bool use_function_convention) {
const int num_nodes = state.range(0);
DeviceAttributes device_info;
device_info.set_name("/job:a/replica:0/task:0/cpu:0");
device_info.set_device_type(DeviceType(DEVICE_CPU).type());
device_info.set_incarnation(0);
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* last_node = nullptr;
for (int i = 0; i < num_nodes; i++) {
string name = strings::StrCat("N", i);
if (i > 0) {
last_node = ops::UnaryOp("Op", last_node, b.opts().WithName(name));
} else {
last_node = ops::SourceOp("In", b.opts().WithName(name));
}
}
TF_CHECK_OK(GraphDefBuilderToGraph(b, &g));
}
std::vector<string> fed;
if (num_nodes > 1000) {
fed.push_back(strings::StrCat("N", num_nodes - 1000));
}
std::vector<string> fetch;
std::vector<string> targets = {strings::StrCat("N", num_nodes - 1)};
for (auto s : state) {
Graph* subgraph = new Graph(OpRegistry::Global());
CopyGraph(g, subgraph);
subgraph::RewriteGraphMetadata metadata;
TF_CHECK_OK(subgraph::RewriteGraphForExecution(
subgraph, fed, fetch, targets, device_info, use_function_convention,
&metadata));
delete subgraph;
}
}
void BM_Subgraph(::testing::benchmark::State& state) {
BM_SubgraphHelper(state, false );
}
void BM_SubgraphFunctionConvention(::testing::benchmark::State& state) {
BM_SubgraphHelper(state, true );
}
BENCHMARK(BM_Subgraph)->Arg(100)->Arg(1000)->Arg(10000)->Arg(100000);
BENCHMARK(BM_SubgraphFunctionConvention)
->Arg(100)
->Arg(1000)
->Arg(10000)
->Arg(100000);
}
} | Status FeedInputs(
Graph* g, const std::vector<std::unique_ptr<PruneRewrite>>& feed_rewrites,
NameIndex* name_index, DataTypeVector* out_feed_types) {
out_feed_types->clear();
out_feed_types->reserve(feed_rewrites.size());
for (size_t i = 0; i < feed_rewrites.size(); ++i) {
const string& t = feed_rewrites[i]->endpoint_name();
TensorId id(ParseTensorName(t));
auto iter = name_index->find(id.first);
if (iter == name_index->end()) {
return errors::NotFound("FeedInputs: unable to find feed output ", t);
}
Node* n = iter->second;
DCHECK_EQ(n->name(), id.first);
if (id.second >= n->num_outputs()) {
return errors::InvalidArgument(
"FeedInputs: ", t, " should have output index < ", n->num_outputs());
}
Node* feed_node;
TF_RETURN_IF_ERROR(
feed_rewrites[i]->AddNode(g, {n, id.second}, &feed_node));
(*name_index)[feed_node->name()] = feed_node;
g->AddControlEdge(g->source_node(), feed_node, true);
std::vector<const Edge*> to_remove;
for (const Edge* e : n->out_edges()) {
if (e->src_output() == id.second) {
to_remove.emplace_back(e);
} else if (e->src_output() == Graph::kControlSlot &&
(n->type_string() == "Placeholder" ||
n->type_string() == "PlaceholderV2")) {
to_remove.emplace_back(e);
}
}
for (const Edge* e : to_remove) {
if (e->src_output() == id.second) {
g->AddEdge(feed_node, 0, e->dst(), e->dst_input());
} else {
CHECK_EQ(Graph::kControlSlot, e->src_output());
g->AddControlEdge(feed_node, e->dst(), true);
}
g->RemoveEdge(e);
}
out_feed_types->push_back(BaseType(n->output_type(id.second)));
}
return absl::OkStatus();
} | TEST_F(SubgraphTest, FedOutputs1) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("input:1", "", "t2"));
ExpectNodes("W1,W2,_recv_input_1,t1,t2");
}
TEST_F(SubgraphTest, FedOutputs1_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK",
Subgraph("input:1", "", "t2", true ));
ExpectNodes("W1,W2,_arg_input_1_0,t1,t2");
}
TEST_F(SubgraphTest, FedRefNode) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 't1' op: 'TestMul' input: [ 'W2', 'W1' ] }");
EXPECT_EQ("OK", Subgraph("W1:0", "", "t1"));
ExpectNodes("_recv_W1_0,W2,t1");
Node* n = FindNode("_recv_W1_0");
EXPECT_FALSE(IsRefType(CHECK_NOTNULL(n)->output_type(0)));
}
TEST_F(SubgraphTest, FedRefNode_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 't1' op: 'TestMul' input: [ 'W2', 'W1' ] }");
EXPECT_EQ("OK",
Subgraph("W1:0", "", "t1", true ));
ExpectNodes("_arg_W1_0_0,W2,t1");
Node* n = FindNode("_arg_W1_0_0");
EXPECT_FALSE(IsRefType(CHECK_NOTNULL(n)->output_type(0)));
}
TEST_F(SubgraphTest, FedOutputs2_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("input:1,t1,W2", "", "t2",
true ));
ExpectNodes("_arg_t1_0_1,_arg_W2_0_2,t2");
} |
#include "tensorstore/progress.h"
#include <ostream>
namespace tensorstore {
bool operator==(const ReadProgress& a, const ReadProgress& b) {
return a.total_elements == b.total_elements &&
a.copied_elements == b.copied_elements;
}
bool operator!=(const ReadProgress& a, const ReadProgress& b) {
return !(a == b);
}
std::ostream& operator<<(std::ostream& os, const ReadProgress& a) {
return os << "{ total_elements=" << a.total_elements
<< ", copied_elements=" << a.copied_elements << " }";
}
bool operator==(const WriteProgress& a, const WriteProgress& b) {
return a.total_elements == b.total_elements &&
a.copied_elements == b.copied_elements &&
a.committed_elements == b.committed_elements;
}
bool operator!=(const WriteProgress& a, const WriteProgress& b) {
return !(a == b);
}
std::ostream& operator<<(std::ostream& os, const WriteProgress& a) {
return os << "{ total_elements=" << a.total_elements
<< ", copied_elements=" << a.copied_elements
<< ", committed_elements=" << a.committed_elements << " }";
}
bool operator==(const CopyProgress& a, const CopyProgress& b) {
return a.total_elements == b.total_elements &&
a.read_elements == b.read_elements &&
a.copied_elements == b.copied_elements &&
a.committed_elements == b.committed_elements;
}
bool operator!=(const CopyProgress& a, const CopyProgress& b) {
return !(a == b);
}
std::ostream& operator<<(std::ostream& os, const CopyProgress& a) {
return os << "{ total_elements=" << a.total_elements
<< ", read_elements=" << a.read_elements
<< ", copied_elements=" << a.copied_elements
<< ", committed_elements=" << a.committed_elements << " }";
}
} | #include "tensorstore/progress.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::CopyProgress;
using ::tensorstore::ReadProgress;
using ::tensorstore::WriteProgress;
TEST(ReadProgressTest, Comparison) {
ReadProgress a{1, 1};
ReadProgress b{2, 2};
ReadProgress c{2, 1};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(b, c);
}
TEST(ReadProgressTest, Ostream) {
EXPECT_EQ("{ total_elements=2, copied_elements=1 }",
tensorstore::StrCat(ReadProgress{2, 1}));
}
TEST(WriteProgressTest, Comparison) {
WriteProgress a{1, 1, 1};
WriteProgress b{2, 2, 2};
WriteProgress c{2, 1, 1};
WriteProgress d{2, 1, 2};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(b, d);
EXPECT_NE(b, c);
EXPECT_NE(c, d);
}
TEST(WriteProgressTest, Ostream) {
EXPECT_EQ("{ total_elements=3, copied_elements=2, committed_elements=1 }",
tensorstore::StrCat(WriteProgress{3, 2, 1}));
}
TEST(CopyProgressTest, Comparison) {
CopyProgress a{1, 1, 1, 1};
CopyProgress b{2, 1, 1, 1};
CopyProgress c{1, 2, 1, 1};
CopyProgress d{1, 1, 2, 1};
CopyProgress e{1, 1, 1, 2};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_EQ(e, e);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(a, e);
}
TEST(CopyProgressTest, Ostream) {
EXPECT_EQ(
"{ total_elements=4, read_elements=3, copied_elements=2, "
"committed_elements=1 }",
tensorstore::StrCat(CopyProgress{4, 3, 2, 1}));
}
} | std::ostream& operator<<(std::ostream& os, const WriteProgress& a) {
return os << "{ total_elements=" << a.total_elements
<< ", copied_elements=" << a.copied_elements
<< ", committed_elements=" << a.committed_elements << " }";
} | TEST(WriteProgressTest, Ostream) {
EXPECT_EQ("{ total_elements=3, copied_elements=2, committed_elements=1 }",
tensorstore::StrCat(WriteProgress{3, 2, 1}));
} |
#include <algorithm>
#include <cstdint>
#include <memory>
#include <vector>
#include "Eigen/Core"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/tensor_slice_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_gather {
namespace {
constexpr int kOperandTensor = 0;
constexpr int kStartIndicesTensor = 1;
constexpr int kOutputTensor = 0;
using TfLiteIntArrayUniquePtr =
std::unique_ptr<TfLiteIntArray, decltype(&TfLiteIntArrayFree)>;
template <typename IndexType>
TfLiteStatus ClipStartingIndex(const RuntimeShape& operand_shape,
const int64_t* slice_sizes, int num_slice_sizes,
Index<IndexType>& starting_index) {
if (operand_shape.DimensionsCount() != starting_index.size() ||
operand_shape.DimensionsCount() != num_slice_sizes) {
return kTfLiteError;
}
for (int dim = 0; dim < starting_index.size(); ++dim) {
starting_index[dim] = std::min((int64_t)starting_index[dim],
operand_shape.Dims(dim) - slice_sizes[dim]);
}
return kTfLiteOk;
}
static std::vector<int64_t> GetCollapsedSliceShape(
const int64_t* slice_sizes, int num_slice_sizes,
const int64_t* collapsed_slice_dims, int num_collapsed_slice_dims) {
std::vector<int64_t> result(num_slice_sizes - num_collapsed_slice_dims);
int result_ctr = 0;
for (int dim = 0; dim < num_slice_sizes; dim++) {
if (!ArrayContains(collapsed_slice_dims, num_collapsed_slice_dims, dim)) {
result[result_ctr] = slice_sizes[dim];
result_ctr++;
}
}
return result;
}
static TfLiteIntArrayUniquePtr GetResultShape(
int64_t result_rank, const TfLiteStablehloGatherParams* data,
const RuntimeShape& start_indices_shape) {
TfLiteIntArrayUniquePtr result = TfLiteIntArrayUniquePtr(
TfLiteIntArrayCreate(result_rank), &TfLiteIntArrayFree);
int result_ctr = 0;
std::vector<int64_t> collapsed_slice_shape = GetCollapsedSliceShape(
data->slice_sizes, data->num_slice_sizes, data->collapsed_slice_dims,
data->num_collapsed_slice_dims);
int64_t slice_shape_ctr = 0;
int64_t start_indices_shape_ctr = 0;
for (int64_t dim = 0; dim < result_rank; dim++) {
if (ArrayContains(data->offset_dims, data->num_offset_dims, dim)) {
result->data[result_ctr] = collapsed_slice_shape[slice_shape_ctr];
slice_shape_ctr++;
} else {
if (start_indices_shape_ctr == data->index_vector_dim) {
start_indices_shape_ctr++;
}
result->data[result_ctr] =
start_indices_shape.Dims(start_indices_shape_ctr);
start_indices_shape_ctr++;
}
result_ctr++;
}
return result;
}
template <typename IndexType>
TfLiteStatus SetBatchAndOffsetIndices(const Index<IndexType>& result_index,
const int64_t* offset_dims,
int num_offset_dims,
Index<IndexType>& batch_index,
Index<IndexType>& offset_index) {
int offset_index_ctr = 0;
int batch_index_ctr = 0;
for (int result_dim = 0; result_dim < result_index.size(); ++result_dim) {
if (ArrayContains(offset_dims, num_offset_dims, result_dim)) {
if (offset_index_ctr >= num_offset_dims) {
return kTfLiteError;
}
offset_index[offset_index_ctr] = result_index[result_dim];
offset_index_ctr++;
} else {
if (batch_index_ctr >= result_index.size() - num_offset_dims) {
return kTfLiteError;
}
batch_index[batch_index_ctr] = result_index[result_dim];
batch_index_ctr++;
}
}
return kTfLiteOk;
}
template <typename IndexType, typename DataType>
TfLiteStatus EvalWithTypes(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
int operand_rank = operand->dims->size;
RuntimeShape operand_shape = GetTensorShape(operand);
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteStablehloGatherParams* data =
reinterpret_cast<TfLiteStablehloGatherParams*>(node->builtin_data);
RuntimeShape start_indices_shape = GetTensorShape(start_indices);
int result_rank = output->dims->size;
RuntimeShape result_runtime_shape(result_rank, output->dims->data);
Index<IndexType> result_index = Index<IndexType>(result_rank, 0);
int64_t num_batch_dims = result_rank - data->num_offset_dims;
Index<IndexType> batch_index(num_batch_dims);
Index<IndexType> offset_index(data->num_offset_dims);
do {
TF_LITE_ENSURE_OK(
context, SetBatchAndOffsetIndices(result_index, data->offset_dims,
data->num_offset_dims, batch_index,
offset_index));
Index<IndexType> starting_index_vector =
ReadIndexVector(start_indices, start_indices_shape, batch_index,
data->index_vector_dim);
Index<IndexType> final_starting_index;
ScatterIndex(starting_index_vector, data->start_index_map,
data->num_start_index_map, operand_rank,
&final_starting_index);
TF_LITE_ENSURE_OK(
context,
ClipStartingIndex(operand_shape, data->slice_sizes,
data->num_slice_sizes, final_starting_index));
Index<IndexType> full_offset_index;
ExpandDims(offset_index, data->collapsed_slice_dims,
data->num_collapsed_slice_dims, &full_offset_index);
Index<IndexType> operand_lookup_index =
AddIndices(final_starting_index, full_offset_index);
const DataType* operand_data = GetTensorData<DataType>(operand);
IndexType flat_operand_index =
TensorIndexToFlat(operand_lookup_index.data(),
operand_lookup_index.size(), GetTensorShape(operand));
DataType looked_up_value = operand_data[flat_operand_index];
DataType* result_data = GetTensorData<DataType>(output);
IndexType flat_result_index = TensorIndexToFlat(
result_index.data(), result_index.size(), GetTensorShape(output));
result_data[flat_result_index] = looked_up_value;
} while (NextIndex(result_rank, result_runtime_shape.DimsData(),
result_index.data()));
return TfLiteStatus::kTfLiteOk;
}
template <typename IndexType>
TfLiteStatus EvalWithIndexType(TfLiteContext* context, TfLiteNode* node,
TfLiteType index_type, TfLiteType data_type) {
switch (data_type) {
case kTfLiteFloat16:
return EvalWithTypes<IndexType, Eigen::half>(context, node);
case kTfLiteFloat32:
return EvalWithTypes<IndexType, float>(context, node);
case kTfLiteFloat64:
return EvalWithTypes<IndexType, double>(context, node);
case kTfLiteInt8:
return EvalWithTypes<IndexType, int8_t>(context, node);
case kTfLiteInt16:
return EvalWithTypes<IndexType, int16_t>(context, node);
case kTfLiteInt32:
return EvalWithTypes<IndexType, int32_t>(context, node);
case kTfLiteInt64:
return EvalWithTypes<IndexType, int64_t>(context, node);
case kTfLiteUInt8:
return EvalWithTypes<IndexType, uint8_t>(context, node);
case kTfLiteUInt16:
return EvalWithTypes<IndexType, uint16_t>(context, node);
case kTfLiteUInt32:
return EvalWithTypes<IndexType, uint32_t>(context, node);
case kTfLiteUInt64:
return EvalWithTypes<IndexType, uint64_t>(context, node);
default:
TF_LITE_KERNEL_LOG(
context, "(Index Type: %s, Data Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type), TfLiteTypeGetName(data_type));
return TfLiteStatus::kTfLiteError;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteType index_type = start_indices->type;
TfLiteType data_type = operand->type;
if (index_type == kTfLiteInt32) {
return EvalWithIndexType<int32_t>(context, node, index_type, data_type);
} else if (index_type == kTfLiteInt64) {
return EvalWithIndexType<int64_t>(context, node, index_type, data_type);
} else {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type));
return TfLiteStatus::kTfLiteError;
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteType index_type = start_indices->type;
if (index_type != kTfLiteInt32 && index_type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type));
return TfLiteStatus::kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteStablehloGatherParams* data =
reinterpret_cast<TfLiteStablehloGatherParams*>(node->builtin_data);
RuntimeShape start_indices_shape = GetTensorShape(start_indices);
TfLiteIntArrayUniquePtr result_shape =
GetResultShape(output->dims->size, data, start_indices_shape);
TF_LITE_ENSURE_STATUS(
context->ResizeTensor(context, output, result_shape.release()));
return TfLiteStatus::kTfLiteOk;
}
}
TfLiteRegistration* Register_STABLEHLO_GATHER() {
static TfLiteRegistration r = {nullptr, nullptr, stablehlo_gather::Prepare,
stablehlo_gather::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class StablehloGatherOpModel : public SingleOpModel {
public:
StablehloGatherOpModel(const TensorData& input, const TensorData& indices,
const TfLiteStablehloGatherParams& params) {
input_ = AddInput(input);
indices_ = AddInput(indices);
output_ = AddOutput(TensorData(input.type, {2, 3, 2, 2}));
SetBuiltinOp(
BuiltinOperator_STABLEHLO_GATHER,
BuiltinOptions2_StablehloGatherOptions,
CreateStablehloGatherOptions(
builder_,
builder_.CreateVector(
std::vector(params.offset_dims,
params.offset_dims + params.num_offset_dims)),
builder_.CreateVector(std::vector(
params.collapsed_slice_dims,
params.collapsed_slice_dims + params.num_collapsed_slice_dims)),
builder_.CreateVector(std::vector(
params.start_index_map,
params.start_index_map + params.num_start_index_map)),
params.index_vector_dim,
builder_.CreateVector(
std::vector(params.slice_sizes,
params.slice_sizes + params.num_slice_sizes)),
params.indices_are_sorted)
.Union());
BuildInterpreter({GetShape(input_), GetShape(indices_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
template <typename T>
void SetIndices(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int input_;
int indices_;
int output_;
};
TEST(StablehloScatterOpTest, GathersSlices) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}},
{TensorType_INT64, {2, 3, 2}}, params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, ClipsStartingIndices) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}},
{TensorType_INT64, {2, 3, 2}}, params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 9});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, WorksWithDynamicShapes) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
TensorData indices_tensor = {TensorType_INT64,
{2, 3, 2},
0.0f,
0.0f,
0.0f,
0,
false,
{},
{},
0,
{},
{},
{},
{},
{{-1, -1, 2}}};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}}, indices_tensor,
params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 9});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
}
} | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteType index_type = start_indices->type;
if (index_type != kTfLiteInt32 && index_type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type));
return TfLiteStatus::kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteStablehloGatherParams* data =
reinterpret_cast<TfLiteStablehloGatherParams*>(node->builtin_data);
RuntimeShape start_indices_shape = GetTensorShape(start_indices);
TfLiteIntArrayUniquePtr result_shape =
GetResultShape(output->dims->size, data, start_indices_shape);
TF_LITE_ENSURE_STATUS(
context->ResizeTensor(context, output, result_shape.release()));
return TfLiteStatus::kTfLiteOk;
} | TEST(StablehloScatterOpTest, GathersSlices) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}},
{TensorType_INT64, {2, 3, 2}}, params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, ClipsStartingIndices) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}},
{TensorType_INT64, {2, 3, 2}}, params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 9});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, WorksWithDynamicShapes) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
TensorData indices_tensor = {TensorType_INT64,
{2, 3, 2},
0.0f,
0.0f,
0.0f,
0,
false,
{},
{},
0,
{},
{},
{},
{},
{{-1, -1, 2}}};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}}, indices_tensor,
params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 9});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
} |
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/protobuf/status.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr const char kTempFileSuffix[] = ".tmp";
absl::Status AtomicallyWrite(
absl::string_view filename, tsl::Env* env,
absl::FunctionRef<absl::Status(const std::string&)> nonatomically_write) {
std::string uncommitted_filename = absl::StrCat(filename, "__");
if (!env->CreateUniqueFileName(&uncommitted_filename, kTempFileSuffix)) {
return tsl::errors::Internal("Failed to write file ", filename,
": Unable to create temporary files.");
}
TF_RETURN_IF_ERROR(nonatomically_write(uncommitted_filename));
absl::Status status =
env->RenameFile(uncommitted_filename, std::string(filename));
if (!status.ok()) {
return tsl::errors::Internal("Failed to rename file: ", status.ToString(),
". Source: ", uncommitted_filename,
", destination: ", filename);
}
return status;
}
}
absl::Status AtomicallyWriteStringToFile(absl::string_view filename,
absl::string_view str, tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
TF_RETURN_IF_ERROR(WriteStringToFile(env, uncommitted_filename, str));
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
"Requested to write string: ", str);
return absl::OkStatus();
}
absl::Status AtomicallyWriteBinaryProto(absl::string_view filename,
const tsl::protobuf::Message& proto,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
TF_RETURN_IF_ERROR(WriteBinaryProto(env, uncommitted_filename, proto));
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
"Requested to write proto in binary format: ", proto.DebugString());
return absl::OkStatus();
}
absl::Status AtomicallyWriteTextProto(absl::string_view filename,
const tsl::protobuf::Message& proto,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
TF_RETURN_IF_ERROR(WriteTextProto(env, uncommitted_filename, proto));
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
"Requested to write proto in text format: ", proto.DebugString());
return absl::OkStatus();
}
absl::Status AtomicallyWriteTFRecords(absl::string_view filename,
const std::vector<Tensor>& tensors,
absl::string_view compression,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
snapshot_util::TFRecordWriter writer(uncommitted_filename,
std::string(compression));
TF_RETURN_IF_ERROR(writer.Initialize(env));
TF_RETURN_IF_ERROR(writer.WriteTensors(tensors));
return writer.Close();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
" Requested to atomically write TF record file: ", filename);
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::string>> GetChildren(
absl::string_view directory, tsl::Env* env) {
std::vector<std::string> files, result;
TF_RETURN_IF_ERROR(env->FileExists(std::string(directory)));
absl::Status status = env->GetChildren(std::string(directory), &files);
if (absl::IsNotFound(status)) {
return result;
}
for (std::string& file : files) {
if (!IsTemporaryFile(file)) {
result.push_back(std::move(file));
}
}
return result;
}
bool IsTemporaryFile(absl::string_view filename) {
return absl::EndsWith(filename, kTempFileSuffix);
}
int64_t SnapshotChunksCardinality(absl::string_view snapshot_path,
tsl::Env* env) {
if (!env->FileExists(SnapshotDoneFilePath(snapshot_path)).ok()) {
return kUnknownCardinality;
}
absl::StatusOr<std::vector<std::string>> chunks =
GetChildren(CommittedChunksDirectory(snapshot_path), env);
if (!chunks.ok()) {
return kUnknownCardinality;
}
return chunks->size();
}
}
} | #include "tensorflow/core/data/service/snapshot/file_utils.h"
#include <cstdint>
#include <string>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/io/compression.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
absl::StatusOr<std::string> CreateTestDirectory() {
std::string directory;
if (!tsl::Env::Default()->LocalTempFilename(&directory)) {
return tsl::errors::FailedPrecondition(
"Failed to create local test directory.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(directory));
return directory;
}
using AtomicallyWriteStringToFileTest = ::testing::TestWithParam<std::string>;
TEST_P(AtomicallyWriteStringToFileTest, WriteString) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
std::string file_contents = GetParam();
TF_ASSERT_OK(AtomicallyWriteStringToFile(test_file, file_contents,
tsl::Env::Default()));
std::string data;
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
TF_ASSERT_OK(tsl::ReadFileToString(tsl::Env::Default(), test_file, &data));
EXPECT_EQ(data, file_contents);
}
INSTANTIATE_TEST_SUITE_P(FileContents, AtomicallyWriteStringToFileTest,
::testing::ValuesIn<std::string>({"OK", ""}));
TEST(FileUtilsTest, AtomicallyWriteBinaryProto) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
DatasetDef out = testing::RangeDataset(10);
TF_ASSERT_OK(AtomicallyWriteBinaryProto(test_file, out, tsl::Env::Default()));
DatasetDef in;
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), test_file, &in));
EXPECT_THAT(in, testing::EqualsProto(out));
}
TEST(FileUtilsTest, AtomicallyWriteTextProto) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
DatasetDef out = testing::RangeDataset(10);
TF_ASSERT_OK(AtomicallyWriteTextProto(test_file, out, tsl::Env::Default()));
DatasetDef in;
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
TF_ASSERT_OK(tsl::ReadTextProto(tsl::Env::Default(), test_file, &in));
EXPECT_THAT(in, testing::EqualsProto(out));
}
TEST(FileUtilsTest, AtomicallyWriteTFRecord) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
Tensor out = CreateTensor<int64_t>(TensorShape({2}), {1, 2});
TF_ASSERT_OK(AtomicallyWriteTFRecords(
test_file, {out}, tsl::io::compression::kSnappy, tsl::Env::Default()));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
snapshot_util::TFRecordReaderImpl reader(test_file,
tsl::io::compression::kSnappy);
TF_ASSERT_OK(reader.Initialize(tsl::Env::Default()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<Tensor> in, reader.GetTensors());
EXPECT_EQ(out.DebugString(), in.front().DebugString());
}
TEST(FileUtilsTest, GetChildren) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
TF_ASSERT_OK(AtomicallyWriteStringToFile(test_file, "", tsl::Env::Default()));
std::string tmp_file = tsl::io::JoinPath(directory, "test_file.tmp");
TF_ASSERT_OK(AtomicallyWriteStringToFile(tmp_file, "", tsl::Env::Default()));
EXPECT_THAT(GetChildren(directory, tsl::Env::Default()),
IsOkAndHolds(ElementsAre("test_file")));
}
TEST(FileUtilsTest, GetChildrenEmptyDirectory) {
TF_ASSERT_OK_AND_ASSIGN(std::string empty_directory, CreateTestDirectory());
EXPECT_THAT(GetChildren(empty_directory, tsl::Env::Default()),
IsOkAndHolds(IsEmpty()));
}
TEST(FileUtilsTest, GetChildrenDirectoryNotFound) {
EXPECT_THAT(GetChildren("Not exist", tsl::Env::Default()),
StatusIs(tsl::error::NOT_FOUND));
}
TEST(FileUtilsTest, IsTemporaryFile) {
EXPECT_TRUE(IsTemporaryFile("file.tmp"));
EXPECT_FALSE(IsTemporaryFile("file"));
EXPECT_FALSE(IsTemporaryFile(""));
}
}
}
} | absl::Status AtomicallyWriteTFRecords(absl::string_view filename,
const std::vector<Tensor>& tensors,
absl::string_view compression,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
snapshot_util::TFRecordWriter writer(uncommitted_filename,
std::string(compression));
TF_RETURN_IF_ERROR(writer.Initialize(env));
TF_RETURN_IF_ERROR(writer.WriteTensors(tensors));
return writer.Close();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
" Requested to atomically write TF record file: ", filename);
return absl::OkStatus();
} | TEST(FileUtilsTest, AtomicallyWriteTFRecord) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
Tensor out = CreateTensor<int64_t>(TensorShape({2}), {1, 2});
TF_ASSERT_OK(AtomicallyWriteTFRecords(
test_file, {out}, tsl::io::compression::kSnappy, tsl::Env::Default()));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
snapshot_util::TFRecordReaderImpl reader(test_file,
tsl::io::compression::kSnappy);
TF_ASSERT_OK(reader.Initialize(tsl::Env::Default()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<Tensor> in, reader.GetTensors());
EXPECT_EQ(out.DebugString(), in.front().DebugString());
} |
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("FFT")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("IFFT")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("FFT2D")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 2);
});
REGISTER_OP("IFFT2D")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 2);
});
REGISTER_OP("FFT3D")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 3);
});
REGISTER_OP("IFFT3D")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 3);
});
REGISTER_OP("FFTND")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Input("axes: int32")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("IFFTND")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Input("axes: int32")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
Status RFFTShape(InferenceContext* c, const bool forward, const int rank) {
ShapeHandle out;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), rank, &out));
ShapeHandle unused_shape;
DimensionHandle unused_dim;
ShapeHandle fft_length_input = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(fft_length_input, 1, &unused_shape));
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(fft_length_input, 0), rank, &unused_dim));
const Tensor* fft_length_tensor = c->input_tensor(1);
if (fft_length_tensor == nullptr) {
for (int i = 0; i < rank; ++i) {
TF_RETURN_IF_ERROR(c->ReplaceDim(out, -rank + i, c->UnknownDim(), &out));
}
} else {
auto fft_length_as_vec = fft_length_tensor->vec<int32>();
for (int i = 0; i < rank; ++i) {
auto dim = forward && i == rank - 1 && fft_length_as_vec(i) != 0
? fft_length_as_vec(i) / 2 + 1
: fft_length_as_vec(i);
TF_RETURN_IF_ERROR(c->ReplaceDim(out, -rank + i, c->MakeDim(dim), &out));
}
}
c->set_output(0, out);
return absl::OkStatus();
}
REGISTER_OP("RFFT")
.Input("input: Treal")
.Input("fft_length: int32")
.Output("output: Tcomplex")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, true, 1); });
REGISTER_OP("IRFFT")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Output("output: Treal")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, false, 1); });
REGISTER_OP("RFFT2D")
.Input("input: Treal")
.Input("fft_length: int32")
.Output("output: Tcomplex")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, true, 2); });
REGISTER_OP("IRFFT2D")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Output("output: Treal")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, false, 2); });
REGISTER_OP("RFFT3D")
.Input("input: Treal")
.Input("fft_length: int32")
.Output("output: Tcomplex")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, true, 3); });
REGISTER_OP("IRFFT3D")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Output("output: Treal")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, false, 3); });
REGISTER_OP("RFFTND")
.Input("input: Treal")
.Input("fft_length: int32")
.Input("axes: int32")
.Output("output: Tcomplex")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("IRFFTND")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Input("axes: int32")
.Output("output: Treal")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("BatchFFT")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use FFT");
REGISTER_OP("BatchIFFT")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use IFFT");
REGISTER_OP("BatchFFT2D")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use FFT2D");
REGISTER_OP("BatchIFFT2D")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use IFFT2D");
REGISTER_OP("BatchFFT3D")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use FFT3D");
REGISTER_OP("BatchIFFT3D")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use IFFT3D");
} | #include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
namespace tensorflow {
TEST(MathOpsTest, FFT_ShapeFn) {
for (const auto* op_name : {"FFT", "IFFT"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_OK(op, "[?]", "in0");
INFER_OK(op, "[1]", "in0");
INFER_OK(op, "[1,2,3,4,5,6,7]", "in0");
}
for (const auto* op_name : {"FFT2D", "IFFT2D"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_OK(op, "[?,1]", "in0");
INFER_OK(op, "[1,2]", "in0");
INFER_OK(op, "[1,2,3,4,5,6,7]", "in0");
}
for (const auto* op_name : {"FFT3D", "IFFT3D"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_ERROR("Shape must be at least rank 3 but is rank 2", op, "[1,2]");
INFER_OK(op, "[?,1,?]", "in0");
INFER_OK(op, "[1,2,3]", "in0");
INFER_OK(op, "[1,2,3,4,5,6,7]", "in0");
}
}
TEST(MathOpsTest, RFFT_ShapeFn) {
for (const bool forward : {true, false}) {
ShapeInferenceTestOp op(forward ? "RFFT" : "IRFFT");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[1]", "?");
INFER_OK(op, "[1];?", "[?]");
INFER_OK(op, "[1];[1]", "[?]");
INFER_OK(op, "[?];[1]", "[?]");
INFER_OK(op, "[1,2,3,4];[1]", "[d0_0,d0_1,d0_2,?]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1];[1,1]");
INFER_ERROR("Dimension must be 1 but is 2", op, "[1];[2]");
op.input_tensors.resize(2);
Tensor fft_length = test::AsTensor<int32>({10});
op.input_tensors[1] = &fft_length;
if (forward) {
INFER_OK(op, "[?];[1]", "[6]");
INFER_OK(op, "[1];[1]", "[6]");
INFER_OK(op, "[1,1];[1]", "[d0_0,6]");
} else {
INFER_OK(op, "[?];[1]", "[10]");
INFER_OK(op, "[1];[1]", "[10]");
INFER_OK(op, "[1,1];[1]", "[d0_0,10]");
}
fft_length = test::AsTensor<int32>({11});
if (forward) {
INFER_OK(op, "[?];[1]", "[6]");
INFER_OK(op, "[1];[1]", "[6]");
INFER_OK(op, "[1,1];[1]", "[d0_0,6]");
} else {
INFER_OK(op, "[?];[1]", "[11]");
INFER_OK(op, "[1];[1]", "[11]");
INFER_OK(op, "[1,1];[1]", "[d0_0,11]");
}
fft_length = test::AsTensor<int32>({12});
if (forward) {
INFER_OK(op, "[?];[1]", "[7]");
INFER_OK(op, "[1];[1]", "[7]");
INFER_OK(op, "[1,1];[1]", "[d0_0,7]");
} else {
INFER_OK(op, "[?];[1]", "[12]");
INFER_OK(op, "[1];[1]", "[12]");
INFER_OK(op, "[1,1];[1]", "[d0_0,12]");
}
}
for (const bool forward : {true, false}) {
ShapeInferenceTestOp op(forward ? "RFFT2D" : "IRFFT2D");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[2]", "?");
INFER_OK(op, "[1,1];?", "[?,?]");
INFER_OK(op, "[1,1];[2]", "[?,?]");
INFER_OK(op, "[?,?];[2]", "[?,?]");
INFER_OK(op, "[1,2,3,4];[2]", "[d0_0,d0_1,?,?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1,1];[1,1]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[1,1];[3]");
op.input_tensors.resize(2);
Tensor fft_length = test::AsTensor<int32>({9, 10});
op.input_tensors[1] = &fft_length;
if (forward) {
INFER_OK(op, "[?,?];[2]", "[9,6]");
INFER_OK(op, "[1,1];[2]", "[9,6]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,9,6]");
} else {
INFER_OK(op, "[?,?];[2]", "[9,10]");
INFER_OK(op, "[1,1];[2]", "[9,10]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,9,10]");
}
fft_length = test::AsTensor<int32>({10, 11});
if (forward) {
INFER_OK(op, "[?,?];[2]", "[10,6]");
INFER_OK(op, "[1,1];[2]", "[10,6]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,10,6]");
} else {
INFER_OK(op, "[?,?];[2]", "[10,11]");
INFER_OK(op, "[1,1];[2]", "[10,11]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,10,11]");
}
fft_length = test::AsTensor<int32>({11, 12});
if (forward) {
INFER_OK(op, "[?,?];[2]", "[11,7]");
INFER_OK(op, "[1,1];[2]", "[11,7]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,11,7]");
} else {
INFER_OK(op, "[?,?];[2]", "[11,12]");
INFER_OK(op, "[1,1];[2]", "[11,12]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,11,12]");
}
}
for (const bool forward : {true, false}) {
ShapeInferenceTestOp op(forward ? "RFFT3D" : "IRFFT3D");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[3]", "?");
INFER_OK(op, "[1,1,1];?", "[?,?,?]");
INFER_OK(op, "[1,1,1];[3]", "[?,?,?]");
INFER_OK(op, "[?,?,?];[3]", "[?,?,?]");
INFER_OK(op, "[1,2,3,4];[3]", "[d0_0,?,?,?]");
INFER_ERROR("Shape must be at least rank 3 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1,1,1];[1,1]");
INFER_ERROR("Dimension must be 3 but is 4", op, "[1,1,1];[4]");
op.input_tensors.resize(2);
Tensor fft_length = test::AsTensor<int32>({10, 11, 12});
op.input_tensors[1] = &fft_length;
if (forward) {
INFER_OK(op, "[?,?,?];[3]", "[10,11,7]");
INFER_OK(op, "[1,1,1];[3]", "[10,11,7]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,10,11,7]");
} else {
INFER_OK(op, "[?,?,?];[3]", "[10,11,12]");
INFER_OK(op, "[1,1,1];[3]", "[10,11,12]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,10,11,12]");
}
fft_length = test::AsTensor<int32>({11, 12, 13});
if (forward) {
INFER_OK(op, "[?,?,?];[3]", "[11,12,7]");
INFER_OK(op, "[1,1,1];[3]", "[11,12,7]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,11,12,7]");
} else {
INFER_OK(op, "[?,?,?];[3]", "[11,12,13]");
INFER_OK(op, "[1,1,1];[3]", "[11,12,13]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,11,12,13]");
}
fft_length = test::AsTensor<int32>({12, 13, 14});
if (forward) {
INFER_OK(op, "[?,?,?];[3]", "[12,13,8]");
INFER_OK(op, "[1,1,1];[3]", "[12,13,8]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,12,13,8]");
} else {
INFER_OK(op, "[?,?,?];[3]", "[12,13,14]");
INFER_OK(op, "[1,1,1];[3]", "[12,13,14]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,12,13,14]");
}
}
}
} | REGISTER_OP("FFT2D")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} | #include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
namespace tensorflow {
TEST(MathOpsTest, FFT_ShapeFn) {
for (const auto* op_name : {"FFT", "IFFT"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_OK(op, "[?]", "in0");
INFER_OK(op, "[1]", "in0");
INFER_OK(op, "[1,2,3,4,5,6,7]", "in0");
}
for (const auto* op_name : {"FFT2D", "IFFT2D"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_OK(op, "[?,1]", "in0");
INFER_OK(op, "[1,2]", "in0");
INFER_OK(op, "[1,2,3,4,5,6,7]", "in0");
}
for (const auto* op_name : {"FFT3D", "IFFT3D"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_ERROR("Shape must be at least rank 3 but is rank 2", op, "[1,2]");
INFER_OK(op, "[?,1,?]", "in0");
INFER_OK(op, "[1,2,3]", "in0");
INFER_OK(op, "[1,2,3,4,5,6,7]", "in0");
}
} |
#include "absl/base/internal/throw_delegate.h"
#include <cstdlib>
#include <functional>
#include <new>
#include <stdexcept>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
void ThrowStdLogicError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::logic_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdLogicError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::logic_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdInvalidArgument(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::invalid_argument(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdInvalidArgument(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::invalid_argument(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdDomainError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::domain_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdDomainError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::domain_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdLengthError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::length_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdLengthError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::length_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdOutOfRange(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::out_of_range(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdOutOfRange(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::out_of_range(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdRuntimeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::runtime_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdRuntimeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::runtime_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdRangeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::range_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdRangeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::range_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdOverflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::overflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdOverflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::overflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdUnderflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::underflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdUnderflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::underflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdBadFunctionCall() {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::bad_function_call();
#else
std::abort();
#endif
}
void ThrowStdBadAlloc() {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::bad_alloc();
#else
std::abort();
#endif
}
}
ABSL_NAMESPACE_END
} | #include "absl/base/internal/throw_delegate.h"
#include <functional>
#include <new>
#include <stdexcept>
#include "absl/base/config.h"
#include "gtest/gtest.h"
namespace {
using absl::base_internal::ThrowStdLogicError;
using absl::base_internal::ThrowStdInvalidArgument;
using absl::base_internal::ThrowStdDomainError;
using absl::base_internal::ThrowStdLengthError;
using absl::base_internal::ThrowStdOutOfRange;
using absl::base_internal::ThrowStdRuntimeError;
using absl::base_internal::ThrowStdRangeError;
using absl::base_internal::ThrowStdOverflowError;
using absl::base_internal::ThrowStdUnderflowError;
using absl::base_internal::ThrowStdBadFunctionCall;
using absl::base_internal::ThrowStdBadAlloc;
constexpr const char* what_arg = "The quick brown fox jumps over the lazy dog";
template <typename E>
void ExpectThrowChar(void (*f)(const char*)) {
#ifdef ABSL_HAVE_EXCEPTIONS
try {
f(what_arg);
FAIL() << "Didn't throw";
} catch (const E& e) {
EXPECT_STREQ(e.what(), what_arg);
}
#else
EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg);
#endif
}
template <typename E>
void ExpectThrowString(void (*f)(const std::string&)) {
#ifdef ABSL_HAVE_EXCEPTIONS
try {
f(what_arg);
FAIL() << "Didn't throw";
} catch (const E& e) {
EXPECT_STREQ(e.what(), what_arg);
}
#else
EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg);
#endif
}
template <typename E>
void ExpectThrowNoWhat(void (*f)()) {
#ifdef ABSL_HAVE_EXCEPTIONS
try {
f();
FAIL() << "Didn't throw";
} catch (const E& e) {
}
#else
EXPECT_DEATH_IF_SUPPORTED(f(), "");
#endif
}
TEST(ThrowDelegate, ThrowStdLogicErrorChar) {
ExpectThrowChar<std::logic_error>(ThrowStdLogicError);
}
TEST(ThrowDelegate, ThrowStdInvalidArgumentChar) {
ExpectThrowChar<std::invalid_argument>(ThrowStdInvalidArgument);
}
TEST(ThrowDelegate, ThrowStdDomainErrorChar) {
ExpectThrowChar<std::domain_error>(ThrowStdDomainError);
}
TEST(ThrowDelegate, ThrowStdLengthErrorChar) {
ExpectThrowChar<std::length_error>(ThrowStdLengthError);
}
TEST(ThrowDelegate, ThrowStdOutOfRangeChar) {
ExpectThrowChar<std::out_of_range>(ThrowStdOutOfRange);
}
TEST(ThrowDelegate, ThrowStdRuntimeErrorChar) {
ExpectThrowChar<std::runtime_error>(ThrowStdRuntimeError);
}
TEST(ThrowDelegate, ThrowStdRangeErrorChar) {
ExpectThrowChar<std::range_error>(ThrowStdRangeError);
}
TEST(ThrowDelegate, ThrowStdOverflowErrorChar) {
ExpectThrowChar<std::overflow_error>(ThrowStdOverflowError);
}
TEST(ThrowDelegate, ThrowStdUnderflowErrorChar) {
ExpectThrowChar<std::underflow_error>(ThrowStdUnderflowError);
}
TEST(ThrowDelegate, ThrowStdLogicErrorString) {
ExpectThrowString<std::logic_error>(ThrowStdLogicError);
}
TEST(ThrowDelegate, ThrowStdInvalidArgumentString) {
ExpectThrowString<std::invalid_argument>(ThrowStdInvalidArgument);
}
TEST(ThrowDelegate, ThrowStdDomainErrorString) {
ExpectThrowString<std::domain_error>(ThrowStdDomainError);
}
TEST(ThrowDelegate, ThrowStdLengthErrorString) {
ExpectThrowString<std::length_error>(ThrowStdLengthError);
}
TEST(ThrowDelegate, ThrowStdOutOfRangeString) {
ExpectThrowString<std::out_of_range>(ThrowStdOutOfRange);
}
TEST(ThrowDelegate, ThrowStdRuntimeErrorString) {
ExpectThrowString<std::runtime_error>(ThrowStdRuntimeError);
}
TEST(ThrowDelegate, ThrowStdRangeErrorString) {
ExpectThrowString<std::range_error>(ThrowStdRangeError);
}
TEST(ThrowDelegate, ThrowStdOverflowErrorString) {
ExpectThrowString<std::overflow_error>(ThrowStdOverflowError);
}
TEST(ThrowDelegate, ThrowStdUnderflowErrorString) {
ExpectThrowString<std::underflow_error>(ThrowStdUnderflowError);
}
TEST(ThrowDelegate, ThrowStdBadFunctionCallNoWhat) {
#ifdef ABSL_HAVE_EXCEPTIONS
try {
ThrowStdBadFunctionCall();
FAIL() << "Didn't throw";
} catch (const std::bad_function_call&) {
}
#ifdef _LIBCPP_VERSION
catch (const std::exception&) {
}
#endif
#else
EXPECT_DEATH_IF_SUPPORTED(ThrowStdBadFunctionCall(), "");
#endif
}
TEST(ThrowDelegate, ThrowStdBadAllocNoWhat) {
ExpectThrowNoWhat<std::bad_alloc>(ThrowStdBadAlloc);
}
} | void ThrowStdOutOfRange(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::out_of_range(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} | TEST(ThrowDelegate, ThrowStdOutOfRangeChar) {
ExpectThrowChar<std::out_of_range>(ThrowStdOutOfRange);
}
TEST(ThrowDelegate, ThrowStdOutOfRangeString) {
ExpectThrowString<std::out_of_range>(ThrowStdOutOfRange);
} |
#include "tensorstore/index_space/internal/dimension_selection.h"
#include <numeric>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
absl::Status CheckAndNormalizeDimensions(DimensionIndex input_rank,
span<DimensionIndex> dimensions) {
if (dimensions.size() > input_rank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Number of dimensions (", dimensions.size(),
") exceeds input rank (", input_rank, ")."));
}
std::vector<DimensionIndex> error_dimensions;
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
TENSORSTORE_ASSIGN_OR_RETURN(
const DimensionIndex dim,
NormalizeDimensionIndex(dimensions[i], input_rank));
dimensions[i] = dim;
for (DimensionIndex j = 0; j < i; ++j) {
if (dimensions[j] == dim) {
error_dimensions.push_back(dim);
}
}
}
if (!error_dimensions.empty()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Input dimensions {", absl::StrJoin(error_dimensions, ", "),
"} specified more than once"));
}
return absl::OkStatus();
}
absl::Status GetDimensions(DimensionIndex input_rank,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
result->assign(dimensions.begin(), dimensions.end());
return CheckAndNormalizeDimensions(input_rank, *result);
}
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
return GetDimensions(transform.input_rank(), dimensions, result);
}
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIdentifier> dimensions,
DimensionIndexBuffer* result) {
const DimensionIndex input_rank = transform.input_rank();
result->resize(dimensions.size());
span<const std::string> input_labels = transform.input_labels();
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
TENSORSTORE_ASSIGN_OR_RETURN(
(*result)[i],
NormalizeDimensionIdentifier(dimensions[i], input_labels));
}
return CheckAndNormalizeDimensions(input_rank, *result);
}
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
return GetDimensions(input_rank + dimensions.size(), dimensions, result);
}
absl::Status GetAllDimensions(DimensionIndex input_rank,
DimensionIndexBuffer* result) {
result->resize(input_rank);
std::iota(result->begin(), result->end(), static_cast<DimensionIndex>(0));
return absl::OkStatus();
}
absl::Status GetDimensions(span<const std::string> labels,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result) {
result->clear();
TENSORSTORE_RETURN_IF_ERROR(
NormalizeDynamicDimSpecs(dimensions, labels, result));
return CheckAndNormalizeDimensions(labels.size(), *result);
}
namespace {
Result<DimensionIndex> GetNumNewDimensions(const DimRangeSpec& spec) {
const DimensionIndex step = spec.step;
if (step == 0) return absl::InvalidArgumentError("step must not be 0");
if (spec.inclusive_start) {
const DimensionIndex inclusive_start = *spec.inclusive_start;
if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if ((exclusive_stop < 0) == (inclusive_start < 0) &&
((step > 0 && exclusive_stop >= inclusive_start) ||
(step < 0 && exclusive_stop <= inclusive_start))) {
return CeilOfRatio(*spec.exclusive_stop - inclusive_start, step);
}
} else if (step > 0) {
if (inclusive_start < 0) {
return CeilOfRatio(-inclusive_start, step);
}
} else {
if (inclusive_start >= 0) {
return CeilOfRatio(inclusive_start + 1, -step);
}
}
} else if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if (step > 0) {
if (exclusive_stop >= 0) {
return CeilOfRatio(exclusive_stop, step);
}
} else {
if (exclusive_stop < 0) {
return CeilOfRatio(-(exclusive_stop + 1), -step);
}
}
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"`", spec, "` is not a valid specification for new dimensions"));
}
}
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result) {
DimensionIndex new_rank = input_rank;
for (const auto& spec : dimensions) {
if (auto* r = std::get_if<DimRangeSpec>(&spec)) {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex x, GetNumNewDimensions(*r));
new_rank += x;
} else {
new_rank += 1;
}
}
result->clear();
result->reserve(new_rank);
struct Visitor {
DimensionIndex new_rank;
DimensionIndexBuffer* result;
absl::Status operator()(DimensionIndex i) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionIndex(i, new_rank));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const std::string& label) const {
return absl::InvalidArgumentError(
"New dimensions cannot be specified by label");
}
absl::Status operator()(const DimRangeSpec& s) const {
return NormalizeDimRangeSpec(s, new_rank, result);
}
};
for (const auto& spec : dimensions) {
TENSORSTORE_RETURN_IF_ERROR(std::visit(Visitor{new_rank, result}, spec));
}
return CheckAndNormalizeDimensions(new_rank, *result);
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionIndexBuffer;
using ::tensorstore::DimRangeSpec;
using ::tensorstore::Dims;
using ::tensorstore::dynamic_rank;
using ::tensorstore::DynamicDims;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TestDimExpressionError;
TEST(DimsTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(span<const DimensionIndex>({0, 0, 1})).IndexSlice(0),
absl::StatusCode::kInvalidArgument,
"Number of dimensions .* exceeds input rank .*");
TestDimExpressionError(IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(2).Label("b"), absl::StatusCode::kInvalidArgument,
"Dimension index 2 is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(1, 1).Label("b", "c"),
absl::StatusCode::kInvalidArgument,
"Input dimensions \\{1\\} specified more than once.*");
}
TEST(DimsTest, SelectUsingLabels) {
TestDimExpression(
IndexTransformBuilder<2, 0>()
.input_labels({"x", "y"})
.Finalize()
.value(),
Dims("x").Label("a"),
{0},
IndexTransformBuilder<2, 2>()
.input_labels({"a", "y"})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<2, 0>().input_labels({"a", "y"}).Finalize().value(),
{});
TestDimExpressionError(
IndexTransformBuilder<2, 0>().input_labels({"x", "y"}).Finalize().value(),
Dims("a").Label("z"), absl::StatusCode::kInvalidArgument,
"Label \"a\" does not match one of \\{\"x\", \"y\"\\}");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().input_labels({"", ""}).Finalize().value(),
Dims("").Label("z"), absl::StatusCode::kInvalidArgument,
"Dimension cannot be specified by empty label");
TestDimExpression(
IndexTransformBuilder<2, 0>()
.input_labels({"x", "y"})
.Finalize()
.value(),
Dims({"x", -1}).Label("a", "b"),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_labels({"a", "b"})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<2, 0>().input_labels({"a", "b"}).Finalize().value(),
{});
}
TEST(DynamicDimsTest, Existing) {
const auto original_transform = IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value();
const auto expected_identity_new_transform =
IndexTransformBuilder<4, 4>()
.input_labels({"a1", "b1", "c1", "d1"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<4, 0>()
.input_labels({"a1", "b1", "c1", "d1"})
.Finalize()
.value();
TestDimExpression(
original_transform,
Dims(DimRangeSpec{1, 4, 2}, 0, "c").Label("b1", "d1", "a1", "c1"),
{1, 3, 0, 2},
expected_identity_new_transform,
expected_new_transform,
{});
TestDimExpression(
original_transform,
DynamicDims({DimRangeSpec{1, 4, 2}, 0, "c"})
.Label("b1", "d1", "a1", "c1"),
{1, 3, 0, 2},
expected_identity_new_transform,
expected_new_transform,
{});
}
TEST(DynamicDimsTest, CombinedNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 2}, 0, -1).AddNew().Label("e", "f", "g", "h"),
{1, 3, 0, 7},
IndexTransformBuilder<dynamic_rank, 4>(8, tensorstore::StaticRank<4>{})
.input_labels({"g", "e", "a", "f", "b", "c", "d", "h"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 4)
.output_single_input_dimension(2, 5)
.output_single_input_dimension(3, 6)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(8)
.input_labels({"g", "e", "a", "f", "b", "c", "d", "h"})
.Finalize()
.value(),
{},
false);
}
TEST(DynamicDimsTest, InvalidNewLabel) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 2}, "x").AddNew(),
absl::StatusCode::kInvalidArgument,
"New dimensions cannot be specified by label");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewUnbounded) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, std::nullopt, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewMissingStop) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{5, std::nullopt, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`5:` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewNegativeStop) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, -3, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:-3` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewNegativeStartNegativeStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-5, std::nullopt, -1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`-5::-1` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewMissingStart) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, 5, -1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:5:-1` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidInterval) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{6, 5, 1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`6:5` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidMixedSigns) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-1, 4, 1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`-1:4` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewZeroStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 0}).AddNew(), absl::StatusCode::kInvalidArgument,
"step must not be 0");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidIntervalNegativeStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{5, 6, -1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`5:6:-1` is not a valid specification for new dimensions");
}
TEST(DimsTest, DimRangeSpecNegativeStep) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-4, -7, -2}).AddNew().Label("e", "f"),
{2, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecNegativeIndicesNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-6, -3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-3, std::nullopt, 2}).AddNew().Label("e", "f"),
{3, 5},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, std::nullopt, -1}).AddNew().Label("e", "f"),
{1, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, -4, -2}).AddNew().Label("e", "f"),
{5, 3},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, 3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(ResolveTest, Example) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder<3>().labels({"x", "y", "z"}).Finalize());
DimensionIndexBuffer buffer;
TENSORSTORE_EXPECT_OK(Dims("x", "z").Resolve(domain, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 2));
}
} | namespace {
Result<DimensionIndex> GetNumNewDimensions(const DimRangeSpec& spec) {
const DimensionIndex step = spec.step;
if (step == 0) return absl::InvalidArgumentError("step must not be 0");
if (spec.inclusive_start) {
const DimensionIndex inclusive_start = *spec.inclusive_start;
if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if ((exclusive_stop < 0) == (inclusive_start < 0) &&
((step > 0 && exclusive_stop >= inclusive_start) ||
(step < 0 && exclusive_stop <= inclusive_start))) {
return CeilOfRatio(*spec.exclusive_stop - inclusive_start, step);
}
} else if (step > 0) {
if (inclusive_start < 0) {
return CeilOfRatio(-inclusive_start, step);
}
} else {
if (inclusive_start >= 0) {
return CeilOfRatio(inclusive_start + 1, -step);
}
}
} else if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if (step > 0) {
if (exclusive_stop >= 0) {
return CeilOfRatio(exclusive_stop, step);
}
} else {
if (exclusive_stop < 0) {
return CeilOfRatio(-(exclusive_stop + 1), -step);
}
}
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"`", spec, "` is not a valid specification for new dimensions"));
} | TEST(DimsTest, DimRangeSpecNegativeStep) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-4, -7, -2}).AddNew().Label("e", "f"),
{2, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecNegativeIndicesNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-6, -3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-3, std::nullopt, 2}).AddNew().Label("e", "f"),
{3, 5},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, std::nullopt, -1}).AddNew().Label("e", "f"),
{1, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, -4, -2}).AddNew().Label("e", "f"),
{5, 3},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, 3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
} |
#include "tsl/profiler/utils/buffer_pool.h"
#include <ios>
#include "tsl/platform/logging.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
namespace tsl {
namespace profiler {
BufferPool::BufferPool(size_t buffer_size_in_bytes)
: buffer_size_in_bytes_(buffer_size_in_bytes) {}
BufferPool::~BufferPool() { DestroyAllBuffers(); }
uint8_t* BufferPool::GetOrCreateBuffer() {
{
mutex_lock lock(buffers_mutex_);
if (!buffers_.empty()) {
uint8_t* buffer = buffers_.back();
buffers_.pop_back();
if (!buffer) {
LOG(ERROR) << "A reused buffer must not be null!";
return nullptr;
}
VLOG(3) << "Reused Buffer, buffer=" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec;
return buffer;
}
}
constexpr size_t kBufferAlignSize = 8;
uint8_t* buffer = reinterpret_cast<uint8_t*>(
port::AlignedMalloc(buffer_size_in_bytes_, kBufferAlignSize));
if (buffer == nullptr) {
LOG(WARNING) << "Buffer not allocated.";
return nullptr;
}
VLOG(3) << "Allocated Buffer, buffer=" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec
<< " size=" << buffer_size_in_bytes_;
return buffer;
}
void BufferPool::ReclaimBuffer(uint8_t* buffer) {
mutex_lock lock(buffers_mutex_);
buffers_.push_back(buffer);
VLOG(3) << "Reclaimed Buffer, buffer=" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec;
}
void BufferPool::DestroyAllBuffers() {
mutex_lock lock(buffers_mutex_);
for (uint8_t* buffer : buffers_) {
VLOG(3) << "Freeing Buffer, buffer:" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec;
port::AlignedFree(buffer);
}
buffers_.clear();
}
size_t BufferPool::GetBufferSizeInBytes() const {
return buffer_size_in_bytes_;
}
}
} | #include "tsl/profiler/utils/buffer_pool.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(BufferPoolTest, GetOrCreateBufferAlloc) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* first_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(first_buffer, nullptr);
uint8_t* second_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(second_buffer, first_buffer);
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
first_buffer[idx] = 0xAB;
}
buffer_pool.ReclaimBuffer(first_buffer);
buffer_pool.ReclaimBuffer(second_buffer);
}
TEST(BufferPoolTest, GetOrCreateBufferReuse) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(buffer, nullptr);
buffer[0] = 0xFF;
uint8_t* previous_buffer = buffer;
buffer_pool.ReclaimBuffer(buffer);
uint8_t* reused_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_EQ(reused_buffer, previous_buffer);
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
reused_buffer[idx] = 0xCD;
}
buffer_pool.ReclaimBuffer(reused_buffer);
}
TEST(BufferPoolTest, DestroyAllBuffers) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* first_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(first_buffer, nullptr);
buffer_pool.DestroyAllBuffers();
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
first_buffer[idx] = 0xEF;
}
uint8_t* second_buffer = buffer_pool.GetOrCreateBuffer();
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
second_buffer[idx] = 0xAB;
}
buffer_pool.ReclaimBuffer(first_buffer);
buffer_pool.ReclaimBuffer(second_buffer);
}
}
}
} | #include "tsl/profiler/utils/buffer_pool.h"
#include <ios>
#include "tsl/platform/logging.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
namespace tsl {
namespace profiler {
BufferPool::BufferPool(size_t buffer_size_in_bytes)
: buffer_size_in_bytes_(buffer_size_in_bytes) {} | #include "tsl/profiler/utils/buffer_pool.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(BufferPoolTest, GetOrCreateBufferAlloc) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* first_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(first_buffer, nullptr);
uint8_t* second_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(second_buffer, first_buffer);
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
first_buffer[idx] = 0xAB;
}
buffer_pool.ReclaimBuffer(first_buffer);
buffer_pool.ReclaimBuffer(second_buffer);
}
TEST(BufferPoolTest, GetOrCreateBufferReuse) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(buffer, nullptr);
buffer[0] = 0xFF;
uint8_t* previous_buffer = buffer;
buffer_pool.ReclaimBuffer(buffer);
uint8_t* reused_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_EQ(reused_buffer, previous_buffer);
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
reused_buffer[idx] = 0xCD;
}
buffer_pool.ReclaimBuffer(reused_buffer);
}
TEST(BufferPoolTest, DestroyAllBuffers) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* first_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(first_buffer, nullptr);
buffer_pool.DestroyAllBuffers();
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
first_buffer[idx] = 0xEF;
}
uint8_t* second_buffer = buffer_pool.GetOrCreateBuffer();
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
second_buffer[idx] = 0xAB;
}
buffer_pool.ReclaimBuffer(first_buffer);
buffer_pool.ReclaimBuffer(second_buffer);
} |
#include "tsl/lib/core/bitmap.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <string>
#include "absl/numeric/bits.h"
namespace tsl {
namespace core {
void Bitmap::Reset(size_t n) {
const size_t num_words = NumWords(n);
if (num_words != NumWords(nbits_)) {
Word* w = new Word[num_words];
delete[] word_;
word_ = w;
}
memset(word_, 0, sizeof(word_[0]) * num_words);
nbits_ = n;
}
static size_t FindFirstSet(uint32_t w) {
return w == 0 ? 0 : absl::countr_zero(w) + 1;
}
size_t Bitmap::FirstUnset(size_t start) const {
if (start >= nbits_) {
return nbits_;
}
size_t mask = (1ull << (start % kBits)) - 1;
const size_t nwords = NumWords(nbits_);
for (size_t i = start / kBits; i < nwords; i++) {
Word word = word_[i] | mask;
mask = 0;
size_t r = FindFirstSet(~word);
if (r) {
size_t result = i * kBits + (r - 1);
if (result > nbits_) result = nbits_;
return result;
}
}
return nbits_;
}
std::string Bitmap::ToString() const {
std::string result;
result.resize(bits());
for (size_t i = 0; i < nbits_; i++) {
result[i] = get(i) ? '1' : '0';
}
return result;
}
}
} | #include "tsl/lib/core/bitmap.h"
#include "tsl/lib/random/simple_philox.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace core {
namespace {
size_t NextSize(size_t n) { return n + ((n < 75) ? 1 : 25); }
static void MakeRandomBitmap(random::SimplePhilox* rnd, Bitmap* bitmap) {
size_t n = rnd->Uniform(200);
bitmap->Reset(n);
for (size_t i = 0; i < n; i++) {
if (rnd->OneIn(2)) bitmap->set(i);
}
}
TEST(BitmapTest, Basic) {
for (size_t n = 0; n < 200; n = NextSize(n)) {
Bitmap bits(n);
for (size_t i = 0; i < n; i++) {
EXPECT_FALSE(bits.get(i)) << n << " " << i << " " << bits.ToString();
bits.set(i);
EXPECT_TRUE(bits.get(i)) << n << " " << i << " " << bits.ToString();
bits.clear(i);
EXPECT_FALSE(bits.get(i)) << n << " " << i << " " << bits.ToString();
}
}
}
TEST(BitmapTest, ToString) {
Bitmap bits(10);
bits.set(1);
bits.set(3);
EXPECT_EQ(bits.ToString(), "0101000000");
}
TEST(BitmapTest, FirstUnset) {
for (size_t n = 0; n < 200; n = NextSize(n)) {
for (size_t p = 0; p <= 100; p++) {
for (size_t q = 0; q <= 100; q++) {
Bitmap bitmap(n);
int one_count = 0;
size_t i = 0;
while (i < p && i < n) {
one_count++;
bitmap.set(i);
i++;
}
while (i < n) {
i++;
for (size_t j = 0; j < q && i < n; j++, i++) {
one_count++;
bitmap.set(i);
}
}
int seen = 0;
size_t pos = 0;
while (true) {
pos = bitmap.FirstUnset(pos);
if (pos == n) break;
ASSERT_FALSE(bitmap.get(pos)) << pos << " " << bitmap.ToString();
seen++;
pos++;
}
EXPECT_EQ(seen, n - one_count) << " " << bitmap.ToString();
}
}
}
}
TEST(BitmapTest, FirstUnsetRandom) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int iter = 0; iter < 10000; iter++) {
Bitmap bitmap;
MakeRandomBitmap(&rnd, &bitmap);
size_t zero_bits = 0;
for (size_t i = 0; i < bitmap.bits(); i++) {
if (!bitmap.get(i)) zero_bits++;
}
int seen = 0;
size_t pos = 0;
while (true) {
pos = bitmap.FirstUnset(pos);
if (pos == bitmap.bits()) break;
ASSERT_FALSE(bitmap.get(pos)) << pos << " " << bitmap.ToString();
seen++;
pos++;
}
EXPECT_EQ(seen, zero_bits) << " " << bitmap.ToString();
}
}
}
}
} | std::string Bitmap::ToString() const {
std::string result;
result.resize(bits());
for (size_t i = 0; i < nbits_; i++) {
result[i] = get(i) ? '1' : '0';
}
return result;
} | TEST(BitmapTest, Basic) {
for (size_t n = 0; n < 200; n = NextSize(n)) {
Bitmap bits(n);
for (size_t i = 0; i < n; i++) {
EXPECT_FALSE(bits.get(i)) << n << " " << i << " " << bits.ToString();
bits.set(i);
EXPECT_TRUE(bits.get(i)) << n << " " << i << " " << bits.ToString();
bits.clear(i);
EXPECT_FALSE(bits.get(i)) << n << " " << i << " " << bits.ToString();
}
}
}
TEST(BitmapTest, ToString) {
Bitmap bits(10);
bits.set(1);
bits.set(3);
EXPECT_EQ(bits.ToString(), "0101000000");
}
TEST(BitmapTest, FirstUnset) {
for (size_t n = 0; n < 200; n = NextSize(n)) {
for (size_t p = 0; p <= 100; p++) {
for (size_t q = 0; q <= 100; q++) {
Bitmap bitmap(n);
int one_count = 0;
size_t i = 0;
while (i < p && i < n) {
one_count++;
bitmap.set(i);
i++;
}
while (i < n) {
i++;
for (size_t j = 0; j < q && i < n; j++, i++) {
one_count++;
bitmap.set(i);
}
}
int seen = 0;
size_t pos = 0;
while (true) {
pos = bitmap.FirstUnset(pos);
if (pos == n) break;
ASSERT_FALSE(bitmap.get(pos)) << pos << " " << bitmap.ToString();
seen++;
pos++;
}
EXPECT_EQ(seen, n - one_count) << " " << bitmap.ToString();
}
}
}
}
TEST(BitmapTest, FirstUnsetRandom) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int iter = 0; iter < 10000; iter++) {
Bitmap bitmap;
MakeRandomBitmap(&rnd, &bitmap);
size_t zero_bits = 0;
for (size_t i = 0; i < bitmap.bits(); i++) {
if (!bitmap.get(i)) zero_bits++;
}
int seen = 0;
size_t pos = 0;
while (true) {
pos = bitmap.FirstUnset(pos);
if (pos == bitmap.bits()) break;
ASSERT_FALSE(bitmap.get(pos)) << pos << " " << bitmap.ToString();
seen++;
pos++;
}
EXPECT_EQ(seen, zero_bits) << " " << bitmap.ToString();
}
} |
#include "arolla/expr/annotation_utils.h"
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/annotation_expr_operators.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
absl::StatusOr<bool> IsAnnotation(const ExprNodePtr& node) {
ASSIGN_OR_RETURN(auto op, DecayRegisteredOperator(node->op()));
return !node->node_deps().empty() &&
dynamic_cast<const AnnotationExprOperatorTag*>(op.get()) != nullptr;
}
absl::StatusOr<bool> IsDetachedAnnotation(const ExprNodePtr& node) {
ASSIGN_OR_RETURN(bool is_annotation, IsAnnotation(node));
DCHECK(!is_annotation ||
!node->node_deps().empty());
return is_annotation && node->node_deps()[0]->is_placeholder();
}
absl::StatusOr<ExprNodePtr> GetDetachedAnnotation(ExprNodePtr node) {
ASSIGN_OR_RETURN(bool is_annotation, IsAnnotation(node));
if (!is_annotation) {
return absl::InvalidArgumentError(
absl::StrCat("can not detach annotation from ", GetDebugSnippet(node),
" that is not a valid annotation node"));
}
auto new_deps = node->node_deps();
DCHECK(!new_deps.empty());
new_deps[0] = Placeholder("_");
return WithNewDependencies(node, std::move(new_deps));
}
absl::StatusOr<ExprNodePtr> AttachAnnotation(const ExprNodePtr& node,
const ExprNodePtr& annotation) {
ASSIGN_OR_RETURN(bool is_detached_annotation,
IsDetachedAnnotation(annotation));
if (!is_detached_annotation) {
return absl::InvalidArgumentError(absl::StrCat(
"can not attach a node that is not a detached annotation: %s",
GetDebugSnippet(node)));
}
auto new_deps = annotation->node_deps();
DCHECK(!new_deps.empty());
new_deps[0] = node;
return WithNewDependencies(annotation, std::move(new_deps));
}
absl::StatusOr<ExprNodePtr> AttachAnnotations(
const ExprNodePtr& node, absl::Span<const ExprNodePtr> annotations) {
ExprNodePtr annotated_node = node;
for (const auto& anno : annotations) {
ASSIGN_OR_RETURN(annotated_node, AttachAnnotation(annotated_node, anno));
}
return annotated_node;
}
absl::StatusOr<ExprNodePtr> StripTopmostAnnotations(const ExprNodePtr& expr) {
ExprNodePtr annotationless_expr = expr;
ASSIGN_OR_RETURN(bool is_annotation, IsAnnotation(annotationless_expr));
while (is_annotation) {
if (annotationless_expr->node_deps().empty()) {
return absl::FailedPreconditionError(
absl::StrFormat("incorrect annotation node %s",
GetDebugSnippet(annotationless_expr)));
}
annotationless_expr = annotationless_expr->node_deps()[0];
ASSIGN_OR_RETURN(is_annotation, IsAnnotation(annotationless_expr));
}
return annotationless_expr;
}
absl::StatusOr<ExprNodePtr> StripAnnotations(const ExprNodePtr& expr) {
return Transform(
expr, [](const ExprNodePtr& node) -> absl::StatusOr<ExprNodePtr> {
ASSIGN_OR_RETURN(bool is_annotation, IsAnnotation(node));
DCHECK(!is_annotation ||
!node->node_deps().empty());
return is_annotation ? node->node_deps()[0] : node;
});
}
bool IsQTypeAnnotation(const ExprNodePtr& node) {
auto op = DecayRegisteredOperator(node->op()).value_or(nullptr);
return op != nullptr && typeid(*op) == typeid(QTypeAnnotation) &&
node->node_deps().size() == 2;
}
bool IsNameAnnotation(const ExprNodePtr& node) {
auto op = DecayRegisteredOperator(node->op()).value_or(nullptr);
return op != nullptr && typeid(*op) == typeid(NameAnnotation) &&
node->node_deps().size() == 2;
}
bool IsExportAnnotation(const ExprNodePtr& node) {
auto op = DecayRegisteredOperator(node->op()).value_or(nullptr);
return op != nullptr && ((typeid(*op) == typeid(ExportAnnotation) &&
node->node_deps().size() == 2) ||
(typeid(*op) == typeid(ExportValueAnnotation) &&
node->node_deps().size() == 3));
}
const QType* ReadQTypeAnnotation(const ExprNodePtr& node) {
if (IsQTypeAnnotation(node)) {
DCHECK_EQ(node->node_deps().size(), 2);
if (const auto& qvalue = node->node_deps()[1]->qvalue()) {
if (qvalue->GetType() == GetQTypeQType()) {
return qvalue->UnsafeAs<QTypePtr>();
}
}
}
return nullptr;
}
absl::string_view ReadNameAnnotation(const ExprNodePtr& node) {
if (IsNameAnnotation(node)) {
DCHECK_EQ(node->node_deps().size(), 2);
if (const auto& qvalue = node->node_deps()[1]->qvalue()) {
if (qvalue->GetType() == GetQType<Text>()) {
return qvalue->UnsafeAs<Text>().view();
}
}
}
return "";
}
absl::string_view ReadExportAnnotationTag(const ExprNodePtr& node) {
if (IsExportAnnotation(node)) {
DCHECK_GE(node->node_deps().size(), 2);
if (node->node_deps()[1]->qvalue().has_value() &&
node->node_deps()[1]->qvalue()->GetType() == GetQType<Text>()) {
return node->node_deps()[1]->qvalue()->UnsafeAs<Text>().view();
}
}
return {};
}
ExprNodePtr ReadExportAnnotationValue(const ExprNodePtr& node) {
if (IsExportAnnotation(node)) {
if (node->node_deps().size() == 2) {
return node->node_deps()[0];
} else if (node->node_deps().size() == 3) {
return node->node_deps()[2];
}
}
return nullptr;
}
} | #include "arolla/expr/annotation_utils.h"
#include <memory>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/annotation_expr_operators.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
#include "arolla/util/text.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::Eq;
using ::testing::HasSubstr;
class AnnotationOperatorTest : public ::testing::Test {
void SetUp() override { ASSERT_OK(InitArolla()); }
};
class IdentityAnnotation : public AnnotationExprOperatorTag,
public BasicExprOperator {
public:
IdentityAnnotation()
: BasicExprOperator(
"id", ExprOperatorSignature::MakeArgsN(1), "",
FingerprintHasher("arolla::expr::IdentityAnnotation").Finish()) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const override {
return input_qtypes[0];
}
};
TEST_F(AnnotationOperatorTest, SmokeTest) {
const auto with_annotation = std::make_shared<IdentityAnnotation>();
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(with_annotation, {Leaf("x")}));
ASSERT_OK_AND_ASSIGN(auto lower_expr, ToLowerNode(expr));
EXPECT_THAT(lower_expr, EqualsExpr(expr));
ASSERT_OK_AND_ASSIGN(auto typed_expr,
CallOp(with_annotation, {Literal<float>(1.0)}));
EXPECT_EQ(typed_expr->qtype(), GetQType<float>());
}
TEST_F(AnnotationOperatorTest, StripAnnotations) {
const auto id_anno = std::make_shared<IdentityAnnotation>();
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp(id_anno, {CallOp("math.add",
{CallOp(id_anno, {Leaf("x")}), Leaf("y")})}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr actual, StripAnnotations(expr));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
EXPECT_THAT(actual, EqualsExpr(expected));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp(id_anno, {CallOp(id_anno, {Leaf("x")})}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr actual, StripAnnotations(expr));
ExprNodePtr expected = Leaf("x");
EXPECT_THAT(actual, EqualsExpr(expected));
}
}
TEST_F(AnnotationOperatorTest, StripTopmostAnnotations) {
const auto id_anno = std::make_shared<IdentityAnnotation>();
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp(id_anno, {CallOp("math.add",
{CallOp(id_anno, {Leaf("x")}), Leaf("y")})}));
EXPECT_THAT(StripTopmostAnnotations(expr),
IsOkAndHolds(EqualsExpr(CallOp(
"math.add", {CallOp(id_anno, {Leaf("x")}), Leaf("y")}))));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp(id_anno, {CallOp(id_anno, {Leaf("x")})}));
EXPECT_THAT(StripTopmostAnnotations(expr),
IsOkAndHolds(EqualsExpr(Leaf("x"))));
}
}
class IdentityAnnotation2 : public AnnotationExprOperatorTag,
public BasicExprOperator {
public:
IdentityAnnotation2()
: BasicExprOperator(
"id2", ExprOperatorSignature::MakeArgsN(1), "",
FingerprintHasher("arolla::expr::IdentityAnnotation2").Finish()) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const override {
return input_qtypes[0];
}
};
TEST_F(AnnotationOperatorTest, AttachAnnotations) {
ExprNodePtr expr = Leaf("x");
EXPECT_THAT(AttachAnnotation(expr, expr),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not a detached annotation")));
const auto id_anno = std::make_shared<IdentityAnnotation>();
const auto id_anno2 = std::make_shared<IdentityAnnotation2>();
ASSERT_OK_AND_ASSIGN(auto anno1, CallOp(id_anno, {Placeholder("_")}));
ASSERT_OK_AND_ASSIGN(auto anno2, CallOp(id_anno2, {Placeholder("_")}));
std::vector<ExprNodePtr> annotations = {anno1, anno2};
ASSERT_OK_AND_ASSIGN(auto anno_expr, AttachAnnotations(expr, annotations));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_anno_expr,
CallOp(id_anno2, {CallOp(id_anno, {Leaf("x")})}));
EXPECT_THAT(anno_expr, EqualsExpr(expected_anno_expr));
ASSERT_OK_AND_ASSIGN(auto detached, StripAnnotations(anno_expr));
EXPECT_THAT(detached, EqualsExpr(expr));
}
TEST_F(AnnotationOperatorTest, AnnotationExport) {
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp(ExportAnnotation::Make(), {Leaf("a"), Literal(Text{"b"})}));
ASSERT_TRUE(IsExportAnnotation(expr));
auto expected_value = Leaf("a");
EXPECT_THAT(ReadExportAnnotationTag(expr), Eq("b"));
EXPECT_THAT(ReadExportAnnotationValue(expr), EqualsExpr(expected_value));
}
TEST_F(AnnotationOperatorTest, AnnotationExportValue) {
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp(ExportValueAnnotation::Make(),
{Leaf("a"), Literal(Text{"b"}), Leaf("c")}));
ASSERT_TRUE(IsExportAnnotation(expr));
auto expected_value = Leaf("c");
EXPECT_THAT(ReadExportAnnotationTag(expr), Eq("b"));
EXPECT_THAT(ReadExportAnnotationValue(expr), EqualsExpr(expected_value));
}
TEST_F(AnnotationOperatorTest, AnnotationExportArbitraryNode) {
ExprNodePtr expr = Leaf("a");
ASSERT_FALSE(IsExportAnnotation(expr));
EXPECT_EQ(ReadExportAnnotationTag(expr), "");
EXPECT_EQ(ReadExportAnnotationValue(expr), nullptr);
}
}
} | absl::StatusOr<ExprNodePtr> StripAnnotations(const ExprNodePtr& expr) {
return Transform(
expr, [](const ExprNodePtr& node) -> absl::StatusOr<ExprNodePtr> {
ASSIGN_OR_RETURN(bool is_annotation, IsAnnotation(node));
DCHECK(!is_annotation ||
!node->node_deps().empty());
return is_annotation ? node->node_deps()[0] : node;
});
} | TEST_F(AnnotationOperatorTest, StripAnnotations) {
const auto id_anno = std::make_shared<IdentityAnnotation>();
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp(id_anno, {CallOp("math.add",
{CallOp(id_anno, {Leaf("x")}), Leaf("y")})}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr actual, StripAnnotations(expr));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
EXPECT_THAT(actual, EqualsExpr(expected));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp(id_anno, {CallOp(id_anno, {Leaf("x")})}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr actual, StripAnnotations(expr));
ExprNodePtr expected = Leaf("x");
EXPECT_THAT(actual, EqualsExpr(expected));
}
}
TEST_F(AnnotationOperatorTest, AttachAnnotations) {
ExprNodePtr expr = Leaf("x");
EXPECT_THAT(AttachAnnotation(expr, expr),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not a detached annotation")));
const auto id_anno = std::make_shared<IdentityAnnotation>();
const auto id_anno2 = std::make_shared<IdentityAnnotation2>();
ASSERT_OK_AND_ASSIGN(auto anno1, CallOp(id_anno, {Placeholder("_")}));
ASSERT_OK_AND_ASSIGN(auto anno2, CallOp(id_anno2, {Placeholder("_")}));
std::vector<ExprNodePtr> annotations = {anno1, anno2};
ASSERT_OK_AND_ASSIGN(auto anno_expr, AttachAnnotations(expr, annotations));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_anno_expr,
CallOp(id_anno2, {CallOp(id_anno, {Leaf("x")})}));
EXPECT_THAT(anno_expr, EqualsExpr(expected_anno_expr));
ASSERT_OK_AND_ASSIGN(auto detached, StripAnnotations(anno_expr));
EXPECT_THAT(detached, EqualsExpr(expr));
} |
#include "tsl/profiler/convert/trace_events_to_json.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "json/json.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/utils/format_utils.h"
#include "tsl/profiler/utils/math_utils.h"
namespace tsl {
namespace profiler {
namespace {
inline std::string PicosToMicrosString(uint64 ps) {
return MaxPrecision(PicoToMicro(ps));
}
inline std::string JsonString(const std::string& s) {
return Json::valueToQuotedString(s.c_str());
}
template <typename Map>
std::vector<const typename Map::value_type*> SortByKey(const Map& m) {
std::vector<const typename Map::value_type*> pairs;
pairs.reserve(m.size());
for (const auto& pair : m) {
pairs.push_back(&pair);
}
absl::c_sort(pairs, [](const typename Map::value_type* a,
const typename Map::value_type* b) {
return a->first < b->first;
});
return pairs;
}
inline void AddDeviceMetadata(uint32 device_id, const Device& device,
std::string* json) {
if (!device.name().empty()) {
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id,
R"(,"name":"process_name","args":{"name":)",
JsonString(device.name()), "}},");
}
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id,
R"(,"name":"process_sort_index","args":{"sort_index":)",
device_id, "}},");
}
inline void AddResourceMetadata(uint32 device_id, uint32 resource_id,
const Resource& resource, std::string* json) {
if (!resource.name().empty()) {
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id, R"(,"tid":)",
resource_id, R"(,"name":"thread_name","args":{"name":)",
JsonString(resource.name()), "}},");
}
uint32 sort_index =
resource.sort_index() ? resource.sort_index() : resource_id;
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id, R"(,"tid":)",
resource_id, R"(,"name":"thread_sort_index")",
R"(,"args":{"sort_index":)", sort_index, "}},");
}
inline void AddTraceEvent(const TraceEvent& event, string* json) {
auto duration_ps = std::max(event.duration_ps(), protobuf_uint64{1});
absl::StrAppend(json, R"({"ph":"X","pid":)", event.device_id(), R"(,"tid":)",
event.resource_id(), R"(,"ts":)",
PicosToMicrosString(event.timestamp_ps()), R"(,"dur":)",
PicosToMicrosString(duration_ps), R"(,"name":)",
JsonString(event.name()));
if (!event.args().empty()) {
absl::StrAppend(json, R"(,"args":{)");
for (const auto* arg : SortByKey(event.args())) {
absl::StrAppend(json, JsonString(arg->first), ":",
JsonString(arg->second), ",");
}
json->back() = '}';
}
absl::StrAppend(json, "},");
}
}
std::string TraceContainerToJson(const TraceContainer& container) {
std::string json =
R"({"displayTimeUnit":"ns","metadata":{"highres-ticks":true},)"
R"("traceEvents":[)";
for (const auto* id_and_device : SortByKey(container.trace().devices())) {
uint32 device_id = id_and_device->first;
const Device& device = id_and_device->second;
AddDeviceMetadata(device_id, device, &json);
for (const auto* id_and_resource : SortByKey(device.resources())) {
uint32 resource_id = id_and_resource->first;
const Resource& resource = id_and_resource->second;
AddResourceMetadata(device_id, resource_id, resource, &json);
}
}
for (const TraceEvent* const event : container.UnsortedEvents()) {
AddTraceEvent(*event, &json);
}
absl::StrAppend(&json, "{}]}");
return json;
}
}
} | #include "tsl/profiler/convert/trace_events_to_json.h"
#include <string>
#include "json/json.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/convert/trace_container.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
namespace tsl {
namespace profiler {
namespace {
Json::Value ToJsonValue(const std::string& json_str) {
Json::Value json;
Json::Reader reader;
EXPECT_TRUE(reader.parse(json_str, json));
return json;
}
TEST(TraceEventsToJson, JsonConversion) {
const std::string metadata_string = R"pb(
devices {
key: 2
value {
name: 'D2'
device_id: 2
resources {
key: 2
value { resource_id: 2 name: 'R2.2' }
}
}
}
devices {
key: 1
value {
name: 'D1'
device_id: 1
resources {
key: 2
value { resource_id: 1 name: 'R1.2' }
}
}
}
)pb";
TraceContainer container;
EXPECT_TRUE(container.ParseMetadataFromString(metadata_string));
TraceEvent* event = container.CreateEvent();
event->set_device_id(1);
event->set_resource_id(2);
event->set_name("E1.2.1");
event->set_timestamp_ps(100000);
event->set_duration_ps(10000);
event->mutable_args()->insert({"long_name", "E1.2.1 long"});
event->mutable_args()->insert({"arg2", "arg2 val"});
event = container.CreateEvent();
event->set_device_id(2);
event->set_resource_id(2);
event->set_name("E2.2.1 # \"comment\"");
event->set_timestamp_ps(105000);
container.CapEvents(2);
Json::Value json = ToJsonValue(TraceContainerToJson(container));
Json::Value expected_json = ToJsonValue(R"(
{
"displayTimeUnit": "ns",
"metadata": { "highres-ticks": true },
"traceEvents": [
{"ph":"M", "pid":1, "name":"process_name", "args":{"name":"D1"}},
{"ph":"M", "pid":1, "name":"process_sort_index", "args":{"sort_index":1}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_name",
"args":{"name":"R1.2"}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{"ph":"M", "pid":2, "name":"process_name", "args":{"name":"D2"}},
{"ph":"M", "pid":2, "name":"process_sort_index", "args":{"sort_index":2}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_name",
"args":{"name":"R2.2"}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{
"ph" : "X",
"pid" : 1,
"tid" : 2,
"name" : "E1.2.1",
"ts" : 0.1,
"dur" : 0.01,
"args" : {"arg2": "arg2 val", "long_name": "E1.2.1 long"}
},
{
"ph" : "X",
"pid" : 2,
"tid" : 2,
"name" : "E2.2.1 # \"comment\"",
"ts" : 0.105,
"dur" : 1e-6
},
{}
]
})");
EXPECT_EQ(json, expected_json);
}
}
}
} | inline void AddResourceMetadata(uint32 device_id, uint32 resource_id,
const Resource& resource, std::string* json) {
if (!resource.name().empty()) {
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id, R"(,"tid":)",
resource_id, R"(,"name":"thread_name","args":{"name":)",
JsonString(resource.name()), "}},");
}
uint32 sort_index =
resource.sort_index() ? resource.sort_index() : resource_id;
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id, R"(,"tid":)",
resource_id, R"(,"name":"thread_sort_index")",
R"(,"args":{"sort_index":)", sort_index, "}},");
} | TEST(TraceEventsToJson, JsonConversion) {
const std::string metadata_string = R"pb(
devices {
key: 2
value {
name: 'D2'
device_id: 2
resources {
key: 2
value { resource_id: 2 name: 'R2.2' }
}
}
}
devices {
key: 1
value {
name: 'D1'
device_id: 1
resources {
key: 2
value { resource_id: 1 name: 'R1.2' }
}
}
}
)pb";
TraceContainer container;
EXPECT_TRUE(container.ParseMetadataFromString(metadata_string));
TraceEvent* event = container.CreateEvent();
event->set_device_id(1);
event->set_resource_id(2);
event->set_name("E1.2.1");
event->set_timestamp_ps(100000);
event->set_duration_ps(10000);
event->mutable_args()->insert({"long_name", "E1.2.1 long"});
event->mutable_args()->insert({"arg2", "arg2 val"});
event = container.CreateEvent();
event->set_device_id(2);
event->set_resource_id(2);
event->set_name("E2.2.1 # \"comment\"");
event->set_timestamp_ps(105000);
container.CapEvents(2);
Json::Value json = ToJsonValue(TraceContainerToJson(container));
Json::Value expected_json = ToJsonValue(R"(
{
"displayTimeUnit": "ns",
"metadata": { "highres-ticks": true },
"traceEvents": [
{"ph":"M", "pid":1, "name":"process_name", "args":{"name":"D1"}},
{"ph":"M", "pid":1, "name":"process_sort_index", "args":{"sort_index":1}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_name",
"args":{"name":"R1.2"}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{"ph":"M", "pid":2, "name":"process_name", "args":{"name":"D2"}},
{"ph":"M", "pid":2, "name":"process_sort_index", "args":{"sort_index":2}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_name",
"args":{"name":"R2.2"}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{
"ph" : "X",
"pid" : 1,
"tid" : 2,
"name" : "E1.2.1",
"ts" : 0.1,
"dur" : 0.01,
"args" : {"arg2": "arg2 val", "long_name": "E1.2.1 long"}
},
{
"ph" : "X",
"pid" : 2,
"tid" : 2,
"name" : "E2.2.1 # \"comment\"",
"ts" : 0.105,
"dur" : 1e-6
},
{}
]
})");
EXPECT_EQ(json, expected_json);
} |
#include "quiche/common/quiche_simple_arena.h"
#include <algorithm>
#include <cstring>
#include <utility>
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
QuicheSimpleArena::QuicheSimpleArena(size_t block_size)
: block_size_(block_size) {}
QuicheSimpleArena::~QuicheSimpleArena() = default;
QuicheSimpleArena::QuicheSimpleArena(QuicheSimpleArena&& other) = default;
QuicheSimpleArena& QuicheSimpleArena::operator=(QuicheSimpleArena&& other) =
default;
char* QuicheSimpleArena::Alloc(size_t size) {
Reserve(size);
Block& b = blocks_.back();
QUICHE_DCHECK_GE(b.size, b.used + size);
char* out = b.data.get() + b.used;
b.used += size;
return out;
}
char* QuicheSimpleArena::Realloc(char* original, size_t oldsize,
size_t newsize) {
QUICHE_DCHECK(!blocks_.empty());
Block& last = blocks_.back();
if (last.data.get() <= original && original < last.data.get() + last.size) {
QUICHE_DCHECK_GE(last.data.get() + last.used, original + oldsize);
if (original + oldsize == last.data.get() + last.used) {
if (original + newsize < last.data.get() + last.size) {
last.used += newsize - oldsize;
return original;
}
}
}
char* out = Alloc(newsize);
memcpy(out, original, oldsize);
return out;
}
char* QuicheSimpleArena::Memdup(const char* data, size_t size) {
char* out = Alloc(size);
memcpy(out, data, size);
return out;
}
void QuicheSimpleArena::Free(char* data, size_t size) {
if (blocks_.empty()) {
return;
}
Block& b = blocks_.back();
if (size <= b.used && data + size == b.data.get() + b.used) {
b.used -= size;
}
}
void QuicheSimpleArena::Reset() {
blocks_.clear();
status_.bytes_allocated_ = 0;
}
void QuicheSimpleArena::Reserve(size_t additional_space) {
if (blocks_.empty()) {
AllocBlock(std::max(additional_space, block_size_));
} else {
const Block& last = blocks_.back();
if (last.size < last.used + additional_space) {
AllocBlock(std::max(additional_space, block_size_));
}
}
}
void QuicheSimpleArena::AllocBlock(size_t size) {
blocks_.push_back(Block(size));
status_.bytes_allocated_ += size;
}
QuicheSimpleArena::Block::Block(size_t s)
: data(new char[s]), size(s), used(0) {}
QuicheSimpleArena::Block::~Block() = default;
QuicheSimpleArena::Block::Block(QuicheSimpleArena::Block&& other)
: size(other.size), used(other.used) {
data = std::move(other.data);
}
QuicheSimpleArena::Block& QuicheSimpleArena::Block::operator=(
QuicheSimpleArena::Block&& other) {
size = other.size;
used = other.used;
data = std::move(other.data);
return *this;
}
} | #include "quiche/common/quiche_simple_arena.h"
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace {
size_t kDefaultBlockSize = 2048;
const char kTestString[] = "This is a decently long test string.";
TEST(QuicheSimpleArenaTest, NoAllocationOnConstruction) {
QuicheSimpleArena arena(kDefaultBlockSize);
EXPECT_EQ(0u, arena.status().bytes_allocated());
}
TEST(QuicheSimpleArenaTest, Memdup) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
char* c = arena.Memdup(kTestString, length);
EXPECT_NE(nullptr, c);
EXPECT_NE(c, kTestString);
EXPECT_EQ(absl::string_view(c, length), kTestString);
}
TEST(QuicheSimpleArenaTest, MemdupLargeString) {
QuicheSimpleArena arena(10 );
const size_t length = strlen(kTestString);
char* c = arena.Memdup(kTestString, length);
EXPECT_NE(nullptr, c);
EXPECT_NE(c, kTestString);
EXPECT_EQ(absl::string_view(c, length), kTestString);
}
TEST(QuicheSimpleArenaTest, MultipleBlocks) {
QuicheSimpleArena arena(40 );
std::vector<std::string> strings = {
"One decently long string.", "Another string.",
"A third string that will surely go in a different block."};
std::vector<absl::string_view> copies;
for (const std::string& s : strings) {
absl::string_view sp(arena.Memdup(s.data(), s.size()), s.size());
copies.push_back(sp);
}
EXPECT_EQ(strings.size(), copies.size());
for (size_t i = 0; i < strings.size(); ++i) {
EXPECT_EQ(copies[i], strings[i]);
}
}
TEST(QuicheSimpleArenaTest, UseAfterReset) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
char* c = arena.Memdup(kTestString, length);
arena.Reset();
c = arena.Memdup(kTestString, length);
EXPECT_NE(nullptr, c);
EXPECT_NE(c, kTestString);
EXPECT_EQ(absl::string_view(c, length), kTestString);
}
TEST(QuicheSimpleArenaTest, Free) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
arena.Free(const_cast<char*>(kTestString), length);
char* c1 = arena.Memdup("Foo", 3);
char* c2 = arena.Memdup(kTestString, length);
arena.Free(const_cast<char*>(kTestString), length);
char* c3 = arena.Memdup("Bar", 3);
char* c4 = arena.Memdup(kTestString, length);
EXPECT_NE(c1, c2);
EXPECT_NE(c1, c3);
EXPECT_NE(c1, c4);
EXPECT_NE(c2, c3);
EXPECT_NE(c2, c4);
EXPECT_NE(c3, c4);
arena.Free(c4, length);
arena.Free(c2, length);
char* c5 = arena.Memdup("Baz", 3);
EXPECT_EQ(c4, c5);
}
TEST(QuicheSimpleArenaTest, Alloc) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
char* c1 = arena.Alloc(length);
char* c2 = arena.Alloc(2 * length);
char* c3 = arena.Alloc(3 * length);
char* c4 = arena.Memdup(kTestString, length);
EXPECT_EQ(c1 + length, c2);
EXPECT_EQ(c2 + 2 * length, c3);
EXPECT_EQ(c3 + 3 * length, c4);
EXPECT_EQ(absl::string_view(c4, length), kTestString);
}
TEST(QuicheSimpleArenaTest, Realloc) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
char* c1 = arena.Memdup(kTestString, length);
char* c2 = arena.Realloc(c1, length, 2 * length);
EXPECT_TRUE(c1);
EXPECT_EQ(c1, c2);
EXPECT_EQ(absl::string_view(c1, length), kTestString);
char* c3 = arena.Memdup(kTestString, length);
EXPECT_EQ(c2 + 2 * length, c3);
EXPECT_EQ(absl::string_view(c3, length), kTestString);
char* c4 = arena.Realloc(c3, length, 2 * length);
EXPECT_EQ(c3, c4);
EXPECT_EQ(absl::string_view(c4, length), kTestString);
char* c5 = arena.Realloc(c4, 2 * length, 3 * length);
EXPECT_EQ(c4, c5);
EXPECT_EQ(absl::string_view(c5, length), kTestString);
char* c6 = arena.Memdup(kTestString, length);
EXPECT_EQ(c5 + 3 * length, c6);
EXPECT_EQ(absl::string_view(c6, length), kTestString);
char* c7 = arena.Realloc(c6, length, kDefaultBlockSize);
EXPECT_EQ(absl::string_view(c7, length), kTestString);
arena.Free(c7, kDefaultBlockSize);
char* c8 = arena.Memdup(kTestString, length);
EXPECT_NE(c6, c7);
EXPECT_EQ(c7, c8);
EXPECT_EQ(absl::string_view(c8, length), kTestString);
}
}
} | void QuicheSimpleArena::Free(char* data, size_t size) {
if (blocks_.empty()) {
return;
}
Block& b = blocks_.back();
if (size <= b.used && data + size == b.data.get() + b.used) {
b.used -= size;
}
} | TEST(QuicheSimpleArenaTest, Free) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
arena.Free(const_cast<char*>(kTestString), length);
char* c1 = arena.Memdup("Foo", 3);
char* c2 = arena.Memdup(kTestString, length);
arena.Free(const_cast<char*>(kTestString), length);
char* c3 = arena.Memdup("Bar", 3);
char* c4 = arena.Memdup(kTestString, length);
EXPECT_NE(c1, c2);
EXPECT_NE(c1, c3);
EXPECT_NE(c1, c4);
EXPECT_NE(c2, c3);
EXPECT_NE(c2, c4);
EXPECT_NE(c3, c4);
arena.Free(c4, length);
arena.Free(c2, length);
char* c5 = arena.Memdup("Baz", 3);
EXPECT_EQ(c4, c5);
} |
#include "quiche/quic/core/http/quic_headers_stream.h"
#include <algorithm>
#include <utility>
#include "absl/base/macros.h"
#include "quiche/quic/core/http/quic_spdy_session.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
QuicHeadersStream::CompressedHeaderInfo::CompressedHeaderInfo(
QuicStreamOffset headers_stream_offset, QuicStreamOffset full_length,
quiche::QuicheReferenceCountedPointer<QuicAckListenerInterface>
ack_listener)
: headers_stream_offset(headers_stream_offset),
full_length(full_length),
unacked_length(full_length),
ack_listener(std::move(ack_listener)) {}
QuicHeadersStream::CompressedHeaderInfo::CompressedHeaderInfo(
const CompressedHeaderInfo& other) = default;
QuicHeadersStream::CompressedHeaderInfo::~CompressedHeaderInfo() {}
QuicHeadersStream::QuicHeadersStream(QuicSpdySession* session)
: QuicStream(QuicUtils::GetHeadersStreamId(session->transport_version()),
session,
true, BIDIRECTIONAL),
spdy_session_(session) {
DisableConnectionFlowControlForThisStream();
}
QuicHeadersStream::~QuicHeadersStream() {}
void QuicHeadersStream::OnDataAvailable() {
struct iovec iov;
while (sequencer()->GetReadableRegion(&iov)) {
if (spdy_session_->ProcessHeaderData(iov) != iov.iov_len) {
return;
}
sequencer()->MarkConsumed(iov.iov_len);
MaybeReleaseSequencerBuffer();
}
}
void QuicHeadersStream::MaybeReleaseSequencerBuffer() {
if (spdy_session_->ShouldReleaseHeadersStreamSequencerBuffer()) {
sequencer()->ReleaseBufferIfEmpty();
}
}
bool QuicHeadersStream::OnStreamFrameAcked(QuicStreamOffset offset,
QuicByteCount data_length,
bool fin_acked,
QuicTime::Delta ack_delay_time,
QuicTime receive_timestamp,
QuicByteCount* newly_acked_length) {
QuicIntervalSet<QuicStreamOffset> newly_acked(offset, offset + data_length);
newly_acked.Difference(bytes_acked());
for (const auto& acked : newly_acked) {
QuicStreamOffset acked_offset = acked.min();
QuicByteCount acked_length = acked.max() - acked.min();
for (CompressedHeaderInfo& header : unacked_headers_) {
if (acked_offset < header.headers_stream_offset) {
break;
}
if (acked_offset >= header.headers_stream_offset + header.full_length) {
continue;
}
QuicByteCount header_offset = acked_offset - header.headers_stream_offset;
QuicByteCount header_length =
std::min(acked_length, header.full_length - header_offset);
if (header.unacked_length < header_length) {
QUIC_BUG(quic_bug_10416_1)
<< "Unsent stream data is acked. unacked_length: "
<< header.unacked_length << " acked_length: " << header_length;
OnUnrecoverableError(QUIC_INTERNAL_ERROR,
"Unsent stream data is acked");
return false;
}
if (header.ack_listener != nullptr && header_length > 0) {
header.ack_listener->OnPacketAcked(header_length, ack_delay_time);
}
header.unacked_length -= header_length;
acked_offset += header_length;
acked_length -= header_length;
}
}
while (!unacked_headers_.empty() &&
unacked_headers_.front().unacked_length == 0) {
unacked_headers_.pop_front();
}
return QuicStream::OnStreamFrameAcked(offset, data_length, fin_acked,
ack_delay_time, receive_timestamp,
newly_acked_length);
}
void QuicHeadersStream::OnStreamFrameRetransmitted(QuicStreamOffset offset,
QuicByteCount data_length,
bool ) {
QuicStream::OnStreamFrameRetransmitted(offset, data_length, false);
for (CompressedHeaderInfo& header : unacked_headers_) {
if (offset < header.headers_stream_offset) {
break;
}
if (offset >= header.headers_stream_offset + header.full_length) {
continue;
}
QuicByteCount header_offset = offset - header.headers_stream_offset;
QuicByteCount retransmitted_length =
std::min(data_length, header.full_length - header_offset);
if (header.ack_listener != nullptr && retransmitted_length > 0) {
header.ack_listener->OnPacketRetransmitted(retransmitted_length);
}
offset += retransmitted_length;
data_length -= retransmitted_length;
}
}
void QuicHeadersStream::OnDataBuffered(
QuicStreamOffset offset, QuicByteCount data_length,
const quiche::QuicheReferenceCountedPointer<QuicAckListenerInterface>&
ack_listener) {
if (!unacked_headers_.empty() &&
(offset == unacked_headers_.back().headers_stream_offset +
unacked_headers_.back().full_length) &&
ack_listener == unacked_headers_.back().ack_listener) {
unacked_headers_.back().full_length += data_length;
unacked_headers_.back().unacked_length += data_length;
} else {
unacked_headers_.push_back(
CompressedHeaderInfo(offset, data_length, ack_listener));
}
}
void QuicHeadersStream::OnStreamReset(const QuicRstStreamFrame& ) {
stream_delegate()->OnStreamError(QUIC_INVALID_STREAM_ID,
"Attempt to reset headers stream");
}
} | #include "quiche/quic/core/http/quic_headers_stream.h"
#include <cstdint>
#include <memory>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/http/spdy_utils.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_spdy_session_peer.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/quiche_endian.h"
#include "quiche/spdy/core/http2_frame_decoder_adapter.h"
#include "quiche/spdy/core/http2_header_block.h"
#include "quiche/spdy/core/recording_headers_handler.h"
#include "quiche/spdy/core/spdy_alt_svc_wire_format.h"
#include "quiche/spdy/core/spdy_protocol.h"
#include "quiche/spdy/test_tools/spdy_test_utils.h"
using spdy::ERROR_CODE_PROTOCOL_ERROR;
using spdy::Http2HeaderBlock;
using spdy::RecordingHeadersHandler;
using spdy::SETTINGS_ENABLE_PUSH;
using spdy::SETTINGS_HEADER_TABLE_SIZE;
using spdy::SETTINGS_INITIAL_WINDOW_SIZE;
using spdy::SETTINGS_MAX_CONCURRENT_STREAMS;
using spdy::SETTINGS_MAX_FRAME_SIZE;
using spdy::Spdy3PriorityToHttp2Weight;
using spdy::SpdyAltSvcWireFormat;
using spdy::SpdyDataIR;
using spdy::SpdyErrorCode;
using spdy::SpdyFramer;
using spdy::SpdyFramerVisitorInterface;
using spdy::SpdyGoAwayIR;
using spdy::SpdyHeadersHandlerInterface;
using spdy::SpdyHeadersIR;
using spdy::SpdyPingId;
using spdy::SpdyPingIR;
using spdy::SpdyPriority;
using spdy::SpdyPriorityIR;
using spdy::SpdyPushPromiseIR;
using spdy::SpdyRstStreamIR;
using spdy::SpdySerializedFrame;
using spdy::SpdySettingsId;
using spdy::SpdySettingsIR;
using spdy::SpdyStreamId;
using spdy::SpdyWindowUpdateIR;
using testing::_;
using testing::AnyNumber;
using testing::AtLeast;
using testing::InSequence;
using testing::Invoke;
using testing::Return;
using testing::StrictMock;
using testing::WithArgs;
namespace quic {
namespace test {
namespace {
class MockVisitor : public SpdyFramerVisitorInterface {
public:
MOCK_METHOD(void, OnError,
(http2::Http2DecoderAdapter::SpdyFramerError error,
std::string detailed_error),
(override));
MOCK_METHOD(void, OnDataFrameHeader,
(SpdyStreamId stream_id, size_t length, bool fin), (override));
MOCK_METHOD(void, OnStreamFrameData,
(SpdyStreamId stream_id, const char*, size_t len), (override));
MOCK_METHOD(void, OnStreamEnd, (SpdyStreamId stream_id), (override));
MOCK_METHOD(void, OnStreamPadding, (SpdyStreamId stream_id, size_t len),
(override));
MOCK_METHOD(SpdyHeadersHandlerInterface*, OnHeaderFrameStart,
(SpdyStreamId stream_id), (override));
MOCK_METHOD(void, OnHeaderFrameEnd, (SpdyStreamId stream_id), (override));
MOCK_METHOD(void, OnRstStream,
(SpdyStreamId stream_id, SpdyErrorCode error_code), (override));
MOCK_METHOD(void, OnSettings, (), (override));
MOCK_METHOD(void, OnSetting, (SpdySettingsId id, uint32_t value), (override));
MOCK_METHOD(void, OnSettingsAck, (), (override));
MOCK_METHOD(void, OnSettingsEnd, (), (override));
MOCK_METHOD(void, OnPing, (SpdyPingId unique_id, bool is_ack), (override));
MOCK_METHOD(void, OnGoAway,
(SpdyStreamId last_accepted_stream_id, SpdyErrorCode error_code),
(override));
MOCK_METHOD(void, OnHeaders,
(SpdyStreamId stream_id, size_t payload_length, bool has_priority,
int weight, SpdyStreamId parent_stream_id, bool exclusive,
bool fin, bool end),
(override));
MOCK_METHOD(void, OnWindowUpdate,
(SpdyStreamId stream_id, int delta_window_size), (override));
MOCK_METHOD(void, OnPushPromise,
(SpdyStreamId stream_id, SpdyStreamId promised_stream_id,
bool end),
(override));
MOCK_METHOD(void, OnContinuation,
(SpdyStreamId stream_id, size_t payload_size, bool end),
(override));
MOCK_METHOD(
void, OnAltSvc,
(SpdyStreamId stream_id, absl::string_view origin,
const SpdyAltSvcWireFormat::AlternativeServiceVector& altsvc_vector),
(override));
MOCK_METHOD(void, OnPriority,
(SpdyStreamId stream_id, SpdyStreamId parent_stream_id,
int weight, bool exclusive),
(override));
MOCK_METHOD(void, OnPriorityUpdate,
(SpdyStreamId prioritized_stream_id,
absl::string_view priority_field_value),
(override));
MOCK_METHOD(bool, OnUnknownFrame,
(SpdyStreamId stream_id, uint8_t frame_type), (override));
MOCK_METHOD(void, OnUnknownFrameStart,
(SpdyStreamId stream_id, size_t length, uint8_t type,
uint8_t flags),
(override));
MOCK_METHOD(void, OnUnknownFramePayload,
(SpdyStreamId stream_id, absl::string_view payload), (override));
};
struct TestParams {
TestParams(const ParsedQuicVersion& version, Perspective perspective)
: version(version), perspective(perspective) {
QUIC_LOG(INFO) << "TestParams: " << *this;
}
TestParams(const TestParams& other)
: version(other.version), perspective(other.perspective) {}
friend std::ostream& operator<<(std::ostream& os, const TestParams& tp) {
os << "{ version: " << ParsedQuicVersionToString(tp.version)
<< ", perspective: "
<< (tp.perspective == Perspective::IS_CLIENT ? "client" : "server")
<< "}";
return os;
}
ParsedQuicVersion version;
Perspective perspective;
};
std::string PrintToString(const TestParams& tp) {
return absl::StrCat(
ParsedQuicVersionToString(tp.version), "_",
(tp.perspective == Perspective::IS_CLIENT ? "client" : "server"));
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
ParsedQuicVersionVector all_supported_versions = AllSupportedVersions();
for (size_t i = 0; i < all_supported_versions.size(); ++i) {
if (VersionUsesHttp3(all_supported_versions[i].transport_version)) {
continue;
}
for (Perspective p : {Perspective::IS_SERVER, Perspective::IS_CLIENT}) {
params.emplace_back(all_supported_versions[i], p);
}
}
return params;
}
class QuicHeadersStreamTest : public QuicTestWithParam<TestParams> {
public:
QuicHeadersStreamTest()
: connection_(new StrictMock<MockQuicConnection>(
&helper_, &alarm_factory_, perspective(), GetVersion())),
session_(connection_),
body_("hello world"),
stream_frame_(
QuicUtils::GetHeadersStreamId(connection_->transport_version()),
false,
0, ""),
next_promised_stream_id_(2) {
QuicSpdySessionPeer::SetMaxInboundHeaderListSize(&session_, 256 * 1024);
EXPECT_CALL(session_, OnCongestionWindowChange(_)).Times(AnyNumber());
session_.Initialize();
connection_->SetEncrypter(
quic::ENCRYPTION_FORWARD_SECURE,
std::make_unique<quic::NullEncrypter>(connection_->perspective()));
headers_stream_ = QuicSpdySessionPeer::GetHeadersStream(&session_);
headers_[":status"] = "200 Ok";
headers_["content-length"] = "11";
framer_ = std::unique_ptr<SpdyFramer>(
new SpdyFramer(SpdyFramer::ENABLE_COMPRESSION));
deframer_ = std::unique_ptr<http2::Http2DecoderAdapter>(
new http2::Http2DecoderAdapter());
deframer_->set_visitor(&visitor_);
EXPECT_EQ(transport_version(), session_.transport_version());
EXPECT_TRUE(headers_stream_ != nullptr);
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
client_id_1_ = GetNthClientInitiatedBidirectionalStreamId(
connection_->transport_version(), 0);
client_id_2_ = GetNthClientInitiatedBidirectionalStreamId(
connection_->transport_version(), 1);
client_id_3_ = GetNthClientInitiatedBidirectionalStreamId(
connection_->transport_version(), 2);
next_stream_id_ =
QuicUtils::StreamIdDelta(connection_->transport_version());
}
QuicStreamId GetNthClientInitiatedId(int n) {
return GetNthClientInitiatedBidirectionalStreamId(
connection_->transport_version(), n);
}
QuicConsumedData SaveIov(size_t write_length) {
char* buf = new char[write_length];
QuicDataWriter writer(write_length, buf, quiche::NETWORK_BYTE_ORDER);
headers_stream_->WriteStreamData(headers_stream_->stream_bytes_written(),
write_length, &writer);
saved_data_.append(buf, write_length);
delete[] buf;
return QuicConsumedData(write_length, false);
}
void SavePayload(const char* data, size_t len) {
saved_payloads_.append(data, len);
}
bool SaveHeaderData(const char* data, int len) {
saved_header_data_.append(data, len);
return true;
}
void SaveHeaderDataStringPiece(absl::string_view data) {
saved_header_data_.append(data.data(), data.length());
}
void SavePromiseHeaderList(QuicStreamId ,
QuicStreamId , size_t size,
const QuicHeaderList& header_list) {
SaveToHandler(size, header_list);
}
void SaveHeaderList(QuicStreamId , bool , size_t size,
const QuicHeaderList& header_list) {
SaveToHandler(size, header_list);
}
void SaveToHandler(size_t size, const QuicHeaderList& header_list) {
headers_handler_ = std::make_unique<RecordingHeadersHandler>();
headers_handler_->OnHeaderBlockStart();
for (const auto& p : header_list) {
headers_handler_->OnHeader(p.first, p.second);
}
headers_handler_->OnHeaderBlockEnd(size, size);
}
void WriteAndExpectRequestHeaders(QuicStreamId stream_id, bool fin,
SpdyPriority priority) {
WriteHeadersAndCheckData(stream_id, fin, priority, true );
}
void WriteAndExpectResponseHeaders(QuicStreamId stream_id, bool fin) {
WriteHeadersAndCheckData(stream_id, fin, 0, false );
}
void WriteHeadersAndCheckData(QuicStreamId stream_id, bool fin,
SpdyPriority priority, bool is_request) {
EXPECT_CALL(session_, WritevData(QuicUtils::GetHeadersStreamId(
connection_->transport_version()),
_, _, NO_FIN, _, _))
.WillOnce(WithArgs<1>(Invoke(this, &QuicHeadersStreamTest::SaveIov)));
QuicSpdySessionPeer::WriteHeadersOnHeadersStream(
&session_, stream_id, headers_.Clone(), fin,
spdy::SpdyStreamPrecedence(priority), nullptr);
if (is_request) {
EXPECT_CALL(
visitor_,
OnHeaders(stream_id, saved_data_.length() - spdy::kFrameHeaderSize,
kHasPriority, Spdy3PriorityToHttp2Weight(priority),
0,
false, fin, kFrameComplete));
} else {
EXPECT_CALL(
visitor_,
OnHeaders(stream_id, saved_data_.length() - spdy::kFrameHeaderSize,
!kHasPriority,
0,
0,
false, fin, kFrameComplete));
}
headers_handler_ = std::make_unique<RecordingHeadersHandler>();
EXPECT_CALL(visitor_, OnHeaderFrameStart(stream_id))
.WillOnce(Return(headers_handler_.get()));
EXPECT_CALL(visitor_, OnHeaderFrameEnd(stream_id)).Times(1);
if (fin) {
EXPECT_CALL(visitor_, OnStreamEnd(stream_id));
}
deframer_->ProcessInput(saved_data_.data(), saved_data_.length());
EXPECT_FALSE(deframer_->HasError())
<< http2::Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
CheckHeaders();
saved_data_.clear();
}
void CheckHeaders() {
ASSERT_TRUE(headers_handler_);
EXPECT_EQ(headers_, headers_handler_->decoded_block());
headers_handler_.reset();
}
Perspective perspective() const { return GetParam().perspective; }
QuicTransportVersion transport_version() const {
return GetParam().version.transport_version;
}
ParsedQuicVersionVector GetVersion() {
ParsedQuicVersionVector versions;
versions.push_back(GetParam().version);
return versions;
}
void TearDownLocalConnectionState() {
QuicConnectionPeer::TearDownLocalConnectionState(connection_);
}
QuicStreamId NextPromisedStreamId() {
return next_promised_stream_id_ += next_stream_id_;
}
static constexpr bool kFrameComplete = true;
static constexpr bool kHasPriority = true;
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
StrictMock<MockQuicConnection>* connection_;
StrictMock<MockQuicSpdySession> session_;
QuicHeadersStream* headers_stream_;
Http2HeaderBlock headers_;
std::unique_ptr<RecordingHeadersHandler> headers_handler_;
std::string body_;
std::string saved_data_;
std::string saved_header_data_;
std::string saved_payloads_;
std::unique_ptr<SpdyFramer> framer_;
std::unique_ptr<http2::Http2DecoderAdapter> deframer_;
StrictMock<MockVisitor> visitor_;
QuicStreamFrame stream_frame_;
QuicStreamId next_promised_stream_id_;
QuicStreamId client_id_1_;
QuicStreamId client_id_2_;
QuicStreamId client_id_3_;
QuicStreamId next_stream_id_;
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicHeadersStreamTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QuicHeadersStreamTest, StreamId) {
EXPECT_EQ(QuicUtils::GetHeadersStreamId(connection_->transport_version()),
headers_stream_->id());
}
TEST_P(QuicHeadersStreamTest, WriteHeaders) {
for (QuicStreamId stream_id = client_id_1_; stream_id < client_id_3_;
stream_id += next_stream_id_) {
for (bool fin : {false, true}) {
if (perspective() == Perspective::IS_SERVER) {
WriteAndExpectResponseHeaders(stream_id, fin);
} else {
for (SpdyPriority priority = 0; priority < 7; ++priority) {
WriteAndExpectRequestHeaders(stream_id, fin, 0);
}
}
}
}
}
TEST_P(QuicHeadersStreamTest, ProcessRawData) {
for (QuicStreamId stream_id = client_id_1_; stream_id < client_id_3_;
stream_id += next_stream_id_) {
for (bool fin : {false, true}) {
for (SpdyPriority priority = 0; priority < 7; ++priority) {
SpdySerializedFrame frame;
if (perspective() == Perspective::IS_SERVER) {
SpdyHeadersIR headers_frame(stream_id, headers_.Clone());
headers_frame.set_fin(fin);
headers_frame.set_has_priority(true);
headers_frame.set_weight(Spdy3PriorityToHttp2Weight(0));
frame = framer_->SerializeFrame(headers_frame);
EXPECT_CALL(session_, OnStreamHeadersPriority(
stream_id, spdy::SpdyStreamPrecedence(0)));
} else {
SpdyHeadersIR headers_frame(stream_id, headers_.Clone());
headers_frame.set_fin(fin);
frame = framer_->SerializeFrame(headers_frame);
}
EXPECT_CALL(session_,
OnStreamHeaderList(stream_id, fin, frame.size(), _))
.WillOnce(Invoke(this, &QuicHeadersStreamTest::SaveHeaderList));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
stream_frame_.offset += frame.size();
CheckHeaders();
}
}
}
}
TEST_P(QuicHeadersStreamTest, ProcessPushPromise) {
for (QuicStreamId stream_id = client_id_1_; stream_id < client_id_3_;
stream_id += next_stream_id_) {
QuicStreamId promised_stream_id = NextPromisedStreamId();
SpdyPushPromiseIR push_promise(stream_id, promised_stream_id,
headers_.Clone());
SpdySerializedFrame frame(framer_->SerializeFrame(push_promise));
if (perspective() == Perspective::IS_SERVER) {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_INVALID_HEADERS_STREAM_DATA,
"PUSH_PROMISE not supported.", _))
.WillRepeatedly(InvokeWithoutArgs(
this, &QuicHeadersStreamTest::TearDownLocalConnectionState));
} else {
EXPECT_CALL(session_, MaybeSendRstStreamFrame(promised_stream_id, _, _));
}
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
stream_frame_.offset += frame.size();
}
}
TEST_P(QuicHeadersStreamTest, ProcessPriorityFrame) {
QuicStreamId parent_stream_id = 0;
for (SpdyPriority priority = 0; priority < 7; ++priority) {
for (QuicStreamId stream_id = client_id_1_; stream_id < client_id_3_;
stream_id += next_stream_id_) {
int weight = Spdy3PriorityToHttp2Weight(priority);
SpdyPriorityIR priority_frame(stream_id, parent_stream_id, weight, true);
SpdySerializedFrame frame(framer_->SerializeFrame(priority_frame));
parent_stream_id = stream_id;
if (perspective() == Perspective::IS_CLIENT) {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_INVALID_HEADERS_STREAM_DATA,
"Server must not send PRIORITY frames.", _))
.WillRepeatedly(InvokeWithoutArgs(
this, &QuicHeadersStreamTest::TearDownLocalConnectionState));
} else {
EXPECT_CALL(
session_,
OnPriorityFrame(stream_id, spdy::SpdyStreamPrecedence(priority)))
.Times(1);
}
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
stream_frame_.offset += frame.size();
}
}
}
TEST_P(QuicHeadersStreamTest, ProcessPushPromiseDisabledSetting) {
if (perspective() != Perspective::IS_CLIENT) {
return;
}
session_.OnConfigNegotiated();
SpdySettingsIR data;
data.AddSetting(SETTINGS_ENABLE_PUSH, 0);
SpdySerializedFrame frame(framer_->SerializeFrame(data));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_HEADERS_STREAM_DATA,
"Unsupported field of HTTP/2 SETTINGS frame: 2", _));
headers_stream_->OnStreamFrame(stream_frame_);
}
TEST_P(QuicHeadersStreamTest, ProcessLargeRawData) {
headers_["key0"] = std::string(1 << 13, '.');
headers_["key1"] = std::string(1 << 13, '.');
headers_["key2"] = std::string(1 << 13, '.');
for (QuicStreamId stream_id = client_id_1_; stream_id < client_id_3_;
stream_id += next_stream_id_) {
for (bool fin : {false, true}) {
for (SpdyPriority priority = 0; priority < 7; ++priority) {
SpdySerializedFrame frame;
if (perspective() == Perspective::IS_SERVER) {
SpdyHeadersIR headers_frame(stream_id, headers_.Clone());
headers_frame.set_fin(fin);
headers_frame.set_has_priority(true);
headers_frame.set_weight(Spdy3PriorityToHttp2Weight(0));
frame = framer_->SerializeFrame(headers_frame);
EXPECT_CALL(session_, OnStreamHeadersPriority(
stream_id, spdy::SpdyStreamPrecedence(0)));
} else {
SpdyHeadersIR headers_frame(stream_id, headers_.Clone());
headers_frame.set_fin(fin);
frame = framer_->SerializeFrame(headers_frame);
}
EXPECT_CALL(session_,
OnStreamHeaderList(stream_id, fin, frame.size(), _))
.WillOnce(Invoke(this, &QuicHeadersStreamTest::SaveHeaderList));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
stream_frame_.offset += frame.size();
CheckHeaders();
}
}
}
}
TEST_P(QuicHeadersStreamTest, ProcessBadData) {
const char kBadData[] = "blah blah blah";
EXPECT_CALL(*connection_,
CloseConnection(QUIC_INVALID_HEADERS_STREAM_DATA, _, _))
.Times(::testing::AnyNumber());
stream_frame_.data_buffer = kBadData;
stream_frame_.data_length = strlen(kBadData);
headers_stream_->OnStreamFrame(stream_frame_);
}
TEST_P(QuicHeadersStreamTest, ProcessSpdyDataFrame) {
SpdyDataIR data( 2, "ping");
SpdySerializedFrame frame(framer_->SerializeFrame(data));
EXPECT_CALL(*connection_, CloseConnection(QUIC_INVALID_HEADERS_STREAM_DATA,
"SPDY DATA frame received.", _))
.WillOnce(InvokeWithoutArgs(
this, &QuicHeadersStreamTest::TearDownLocalConnectionState));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
}
TEST_P(QuicHeadersStreamTest, ProcessSpdyRstStreamFrame) {
SpdyRstStreamIR data( 2, ERROR_CODE_PROTOCOL_ERROR);
SpdySerializedFrame frame(framer_->SerializeFrame(data));
EXPECT_CALL(*connection_,
CloseConnection(QUIC_INVALID_HEADERS_STREAM_DATA,
"SPDY RST_STREAM frame received.", _))
.WillOnce(InvokeWithoutArgs(
this, &QuicHeadersStreamTest::TearDownLocalConnectionState));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
}
TEST_P(QuicHeadersStreamTest, RespectHttp2SettingsFrameSupportedFields) {
const uint32_t kTestHeaderTableSize = 1000;
SpdySettingsIR data;
data.AddSetting(SETTINGS_HEADER_TABLE_SIZE, kTestHeaderTableSize);
data.AddSetting(spdy::SETTINGS_MAX_HEADER_LIST_SIZE, 2000);
SpdySerializedFrame frame(framer_->SerializeFrame(data));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
EXPECT_EQ(kTestHeaderTableSize, QuicSpdySessionPeer::GetSpdyFramer(&session_)
->header_encoder_table_size());
}
TEST_P(QuicHeadersStreamTest, LimitEncoderDynamicTableSize) {
const uint32_t kVeryLargeTableSizeLimit = 1024 * 1024 * 1024;
SpdySettingsIR data;
data.AddSetting(SETTINGS_HEADER_TABLE_SIZE, kVeryLargeTableSizeLimit);
SpdySerializedFrame frame(framer_->SerializeFrame(data));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
EXPECT_EQ(16384u, QuicSpdySessionPeer::GetSpdyFramer(&session_)
->header_encoder_table_size());
}
TEST_P(QuicHeadersStreamTest, RespectHttp2SettingsFrameUnsupportedFields) {
SpdySettingsIR data;
data.AddSetting(SETTINGS_MAX_CONCURRENT_STREAMS, 100);
data.AddSetting(SETTINGS_INITIAL_WINDOW_SIZE, 100);
data.AddSetting(SETTINGS_ENABLE_PUSH, 1);
data.AddSetting(SETTINGS_MAX_FRAME_SIZE, 1250);
SpdySerializedFrame frame(framer_->SerializeFrame(data));
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_HEADERS_STREAM_DATA,
absl::StrCat("Unsupported field of HTTP/2 SETTINGS frame: ",
SETTINGS_MAX_CONCURRENT_STREAMS),
_));
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_HEADERS_STREAM_DATA,
absl::StrCat("Unsupported field of HTTP/2 SETTINGS frame: ",
SETTINGS_INITIAL_WINDOW_SIZE),
_));
if (session_.perspective() == Perspective::IS_CLIENT) {
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_HEADERS_STREAM_DATA,
absl::StrCat("Unsupported field of HTTP/2 SETTINGS frame: ",
SETTINGS_ENABLE_PUSH),
_));
}
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_HEADERS_STREAM_DATA,
absl::StrCat("Unsupported field of HTTP/2 SETTINGS frame: ",
SETTINGS_MAX_FRAME_SIZE),
_));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
}
TEST_P(QuicHeadersStreamTest, ProcessSpdyPingFrame) {
SpdyPingIR data(1);
SpdySerializedFrame frame(framer_->SerializeFrame(data));
EXPECT_CALL(*connection_, CloseConnection(QUIC_INVALID_HEADERS_STREAM_DATA,
"SPDY PING frame received.", _))
.WillOnce(InvokeWithoutArgs(
this, &QuicHeadersStreamTest::TearDownLocalConnectionState));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
}
TEST_P(QuicHeadersStreamTest, ProcessSpdyGoAwayFrame) {
SpdyGoAwayIR data( 1, ERROR_CODE_PROTOCOL_ERROR,
"go away");
SpdySerializedFrame frame(framer_->SerializeFrame(data));
EXPECT_CALL(*connection_, CloseConnection(QUIC_INVALID_HEADERS_STREAM_DATA,
"SPDY GOAWAY frame received.", _))
.WillOnce(InvokeWithoutArgs(
this, &QuicHeadersStreamTest::TearDownLocalConnectionState));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
}
TEST_P(QuicHeadersStreamTest, ProcessSpdyWindowUpdateFrame) {
SpdyWindowUpdateIR data( 1, 1);
SpdySerializedFrame frame(framer_->SerializeFrame(data));
EXPECT_CALL(*connection_,
CloseConnection(QUIC_INVALID_HEADERS_STREAM_DATA,
"SPDY WINDOW_UPDATE frame received.", _))
.WillOnce(InvokeWithoutArgs(
this, &QuicHeadersStreamTest::TearDownLocalConnectionState));
stream_frame_.data_buffer = frame.data();
stream_frame_.data_length = frame.size();
headers_stream_->OnStreamFrame(stream_frame_);
}
TEST_P(QuicHeadersStreamTest, NoConnectionLevelFlowControl) {
EXPECT_FALSE(QuicStreamPeer::StreamContributesToConnectionFlowControl(
headers_stream_));
}
TEST_P(QuicHeadersStreamTest, AckSentData) {
EXPECT_CALL(session_, WritevData(QuicUtils::GetHeadersStreamId(
connection_->transport_version()),
_, _, NO_FIN, _, _))
.WillRepeatedly(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
InSequence s;
quiche::QuicheReferenceCountedPointer<MockAckListener> ack_listener1(
new MockAckListener());
quiche::QuicheReferenceCountedPointer<MockAckListener> ack_listener2(
new MockAckListener());
quiche::QuicheReferenceCountedPointer<MockAckListener> ack_listener3(
new MockAckListener());
headers_stream_->WriteOrBufferData("Header5", false, ack_listener1);
headers_stream_->WriteOrBufferData("Header5", false, ack_listener1);
headers_stream_->WriteOrBufferData("Header7", false, ack_listener2);
headers_stream_->WriteOrBufferData("Header9", false, ack_listener3);
headers_stream_->WriteOrBufferData("Header7", false, ack_listener2);
headers_stream_->WriteOrBufferData("Header9", false, ack_listener3);
EXPECT_CALL(*ack_listener3, OnPacketRetransmitted(7)).Times(1);
EXPECT_CALL(*ack_listener2, OnPacketRetransmitted(7)).Times(1);
headers_stream_->OnStreamFrameRetransmitted(21, 7, false);
headers_stream_->OnStreamFrameRetransmitted(28, 7, false);
QuicByteCount newly_acked_length = 0;
EXPECT_CALL(*ack_listener3, OnPacketAcked(7, _));
EXPECT_CALL(*ack_listener2, OnPacketAcked(7, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
21, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
28, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_CALL(*ack_listener3, OnPacketAcked(7, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
35, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_CALL(*ack_listener1, OnPacketAcked(7, _));
EXPECT_CALL(*ack_listener1, OnPacketAcked(7, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
0, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
7, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_CALL(*ack_listener2, OnPacketAcked(7, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
14, 10, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
}
TEST_P(QuicHeadersStreamTest, FrameContainsMultipleHeaders) {
EXPECT_CALL(session_, WritevData(QuicUtils::GetHeadersStreamId(
connection_->transport_version()),
_, _, NO_FIN, _, _))
.WillRepeatedly(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
InSequence s;
quiche::QuicheReferenceCountedPointer<MockAckListener> ack_listener1(
new MockAckListener());
quiche::QuicheReferenceCountedPointer<MockAckListener> ack_listener2(
new MockAckListener());
quiche::QuicheReferenceCountedPointer<MockAckListener> ack_listener3(
new MockAckListener());
headers_stream_->WriteOrBufferData("Header5", false, ack_listener1);
headers_stream_->WriteOrBufferData("Header5", false, ack_listener1);
headers_stream_->WriteOrBufferData("Header7", false, ack_listener2);
headers_stream_->WriteOrBufferData("Header9", false, ack_listener3);
headers_stream_->WriteOrBufferData("Header7", false, ack_listener2);
headers_stream_->WriteOrBufferData("Header9", false, ack_listener3);
EXPECT_CALL(*ack_listener1, OnPacketRetransmitted(14));
EXPECT_CALL(*ack_listener2, OnPacketRetransmitted(3));
headers_stream_->OnStreamFrameRetransmitted(0, 17, false);
QuicByteCount newly_acked_length = 0;
EXPECT_CALL(*ack_listener2, OnPacketAcked(4, _));
EXPECT_CALL(*ack_listener3, OnPacketAcked(7, _));
EXPECT_CALL(*ack_listener2, OnPacketAcked(2, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
17, 13, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(13u, newly_acked_length);
EXPECT_CALL(*ack_listener2, OnPacketAcked(5, _));
EXPECT_CALL(*ack_listener3, OnPacketAcked(7, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
30, 12, | void QuicHeadersStream::OnStreamFrameRetransmitted(QuicStreamOffset offset,
QuicByteCount data_length,
bool ) {
QuicStream::OnStreamFrameRetransmitted(offset, data_length, false);
for (CompressedHeaderInfo& header : unacked_headers_) {
if (offset < header.headers_stream_offset) {
break;
}
if (offset >= header.headers_stream_offset + header.full_length) {
continue;
}
QuicByteCount header_offset = offset - header.headers_stream_offset;
QuicByteCount retransmitted_length =
std::min(data_length, header.full_length - header_offset);
if (header.ack_listener != nullptr && retransmitted_length > 0) {
header.ack_listener->OnPacketRetransmitted(retransmitted_length);
}
offset += retransmitted_length;
data_length -= retransmitted_length;
}
} | TEST_P(QuicHeadersStreamTest, AckSentData) {
EXPECT_CALL(session_, WritevData(QuicUtils::GetHeadersStreamId(
connection_->transport_version()),
_, _, NO_FIN, _, _))
.WillRepeatedly(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
InSequence s;
quiche::QuicheReferenceCountedPointer<MockAckListener> ack_listener1(
new MockAckListener());
quiche::QuicheReferenceCountedPointer<MockAckListener> ack_listener2(
new MockAckListener());
quiche::QuicheReferenceCountedPointer<MockAckListener> ack_listener3(
new MockAckListener());
headers_stream_->WriteOrBufferData("Header5", false, ack_listener1);
headers_stream_->WriteOrBufferData("Header5", false, ack_listener1);
headers_stream_->WriteOrBufferData("Header7", false, ack_listener2);
headers_stream_->WriteOrBufferData("Header9", false, ack_listener3);
headers_stream_->WriteOrBufferData("Header7", false, ack_listener2);
headers_stream_->WriteOrBufferData("Header9", false, ack_listener3);
EXPECT_CALL(*ack_listener3, OnPacketRetransmitted(7)).Times(1);
EXPECT_CALL(*ack_listener2, OnPacketRetransmitted(7)).Times(1);
headers_stream_->OnStreamFrameRetransmitted(21, 7, false);
headers_stream_->OnStreamFrameRetransmitted(28, 7, false);
QuicByteCount newly_acked_length = 0;
EXPECT_CALL(*ack_listener3, OnPacketAcked(7, _));
EXPECT_CALL(*ack_listener2, OnPacketAcked(7, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
21, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
28, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_CALL(*ack_listener3, OnPacketAcked(7, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
35, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_CALL(*ack_listener1, OnPacketAcked(7, _));
EXPECT_CALL(*ack_listener1, OnPacketAcked(7, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
0, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
7, 7, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
EXPECT_CALL(*ack_listener2, OnPacketAcked(7, _));
EXPECT_TRUE(headers_stream_->OnStreamFrameAcked(
14, 10, false, QuicTime::Delta::Zero(), QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(7u, newly_acked_length);
} |
#include "xla/client/lib/constants.h"
#include <limits>
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
XlaOp Zero(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::Zero(type));
}
XlaOp Zeros(XlaBuilder* builder, const Shape& shape) {
return Broadcast(Zero(builder, shape.element_type()), shape.dimensions());
}
XlaOp ZerosLike(XlaOp prototype) {
XlaBuilder* builder = prototype.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(prototype));
return Zeros(builder, shape);
});
}
XlaOp One(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::One(type));
}
XlaOp Epsilon(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::epsilon());
}
return builder->ReportError(InvalidArgument(
"Invalid type for Epsilon (%s).", PrimitiveType_Name(type)));
},
type);
}
XlaOp MinValue(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::MinValue(type));
}
XlaOp MinFiniteValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::lowest());
}
return MinValue(builder, type);
},
type);
}
XlaOp MinPositiveNormalValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::min());
}
return builder->ReportError(
InvalidArgument("Invalid type for MinPositiveNormalValue (%s).",
PrimitiveType_Name(type)));
},
type);
}
XlaOp MaxValue(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::MaxValue(type));
}
XlaOp MaxFiniteValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::max());
}
return MaxValue(builder, type);
},
type);
}
XlaOp NanValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::quiet_NaN());
}
return builder->ReportError(InvalidArgument(
"Invalid type for NanValue (%s).", PrimitiveType_Name(type)));
},
type);
}
} | #include "xla/client/lib/constants.h"
#include <limits>
#include "xla/client/xla_builder.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using ConstantsTest = ClientLibraryTestBase;
using ::testing::HasSubstr;
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::S32, 4);
ComputeAndCompareR0<int32_t>(&builder, 4, {});
}
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32DoesNotAcceptFloats) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::S32, 4.5);
auto statusor = builder.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("Invalid cast"));
}
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeF32) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::F32, -7);
ComputeAndCompareR0<float>(&builder, -7, {});
ConstantR0WithType(&builder, xla::F32, 0.5);
ComputeAndCompareR0<float>(&builder, 0.5, {});
}
XLA_TEST_F(ConstantsTest, ScalarLikeS32) {
XlaBuilder builder(TestName());
ScalarLike(ConstantR0<int32_t>(&builder, 42), -3);
ComputeAndCompareR0<int32_t>(&builder, -3, {});
}
XLA_TEST_F(ConstantsTest, ScalarLikeF32) {
XlaBuilder builder(TestName());
ScalarLike(ConstantR0<float>(&builder, 42.75), -3.2);
ComputeAndCompareR0<float>(&builder, -3.2, {});
}
XLA_TEST_F(ConstantsTest, ZeroS32) {
XlaBuilder builder(TestName());
Zero(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, 0, {});
}
XLA_TEST_F(ConstantsTest, ZeroF32) {
XlaBuilder builder(TestName());
Zero(&builder, F32);
ComputeAndCompareR0<float>(&builder, 0.0, {});
}
XLA_TEST_F(ConstantsTest, ZerosS32) {
XlaBuilder builder(TestName());
Zeros(&builder, ShapeUtil::MakeShape(S32, {2, 2}));
ComputeAndCompareR2<int32_t>(&builder, {{0, 0}, {0, 0}}, {});
}
XLA_TEST_F(ConstantsTest, ZerosLikeF32) {
XlaBuilder builder(TestName());
ZerosLike(ConstantR1<float>(&builder, {1., 2., 3.}));
ComputeAndCompareR1<float>(&builder, {0., 0., 0.}, {});
}
XLA_TEST_F(ConstantsTest, OneS32) {
XlaBuilder builder(TestName());
One(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, 1, {});
}
XLA_TEST_F(ConstantsTest, OneF32) {
XlaBuilder builder(TestName());
One(&builder, F32);
ComputeAndCompareR0<float>(&builder, 1., {});
}
XLA_TEST_F(ConstantsTest, EpsilonF32) {
XlaBuilder builder(TestName());
Epsilon(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::epsilon(),
{});
}
XLA_TEST_F(ConstantsTest, MinFiniteValueS32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::min(),
{});
}
XLA_TEST_F(ConstantsTest, MaxFiniteValueS32) {
XlaBuilder builder(TestName());
MaxFiniteValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::max(),
{});
}
XLA_TEST_F(ConstantsTest, MinFiniteValueF32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::max(), {});
}
XLA_TEST_F(ConstantsTest, MaxFiniteValueF32) {
XlaBuilder builder(TestName());
MaxFiniteValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::max(), {});
}
XLA_TEST_F(ConstantsTest, MinValueS32) {
XlaBuilder builder(TestName());
MinValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::min(),
{});
}
XLA_TEST_F(ConstantsTest, MaxValueS32) {
XlaBuilder builder(TestName());
MaxValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::max(),
{});
}
XLA_TEST_F(ConstantsTest, MinValueF32) {
XlaBuilder builder(TestName());
MinValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::infinity(),
{});
}
XLA_TEST_F(ConstantsTest, MaxValueF32) {
XlaBuilder builder(TestName());
MaxValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::infinity(),
{});
}
XLA_TEST_F(ConstantsTest, NanValueF32) {
XlaBuilder builder(TestName());
NanValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::quiet_NaN(),
{});
}
}
} | XlaOp MinFiniteValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::lowest());
}
return MinValue(builder, type);
},
type);
} | XLA_TEST_F(ConstantsTest, MinFiniteValueS32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::min(),
{});
}
XLA_TEST_F(ConstantsTest, MinFiniteValueF32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::max(), {});
} |
#include "tensorflow/core/kernels/data/experimental/save_dataset_op.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/hash_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/root_dataset.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const SaveDatasetOp::kCompression;
constexpr const char* const SaveDatasetOp::kPath;
constexpr const char* const SaveDatasetOp::kShardFunc;
constexpr const char* const SaveDatasetOp::kShardFuncOtherArgs;
constexpr const char* const SaveDatasetOp::kUseShardFunc;
constexpr const int SaveDatasetOp::kFileFormatVersion;
constexpr const char* const SaveDatasetV2Op::kInputDataset;
constexpr const char* const SaveDatasetV2Op::kPath;
constexpr const char* const SaveDatasetV2Op::kCompression;
constexpr const char* const SaveDatasetV2Op::kDatasetType;
constexpr const char* const SaveDatasetV2Op::kOutputTypes;
constexpr const char* const SaveDatasetV2Op::kOutputShapes;
constexpr const char* const SaveDatasetV2Op::kShardFunc;
constexpr const char* const SaveDatasetV2Op::kShardFuncOtherArgs;
constexpr const char* const SaveDatasetV2Op::kUseShardFunc;
constexpr const char* const SaveDatasetV2Op::kShardFuncTarguments;
constexpr const int SaveDatasetV2Op::kFileFormatVersion;
SaveDatasetOp::SaveDatasetOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_save_dataset") {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kShardFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseShardFunc, &use_shard_func_));
}
Status SaveDatasetOp::DoCompute(OpKernelContext* ctx) {
metrics::RecordTFDataFetchOp("SaveDatasetOp");
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
tstring path;
TF_RETURN_IF_ERROR(ParseScalarArgument(ctx, kPath, &path));
auto run_id = random::New64();
auto run_dir = snapshot_util::RunDirectory(path, run_id);
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir));
TF_RETURN_IF_ERROR(
WriteMetadataFile(ctx->env(), path, run_id, dataset->output_dtypes(),
0, false));
std::unique_ptr<CapturedFunction> captured_func;
TF_RETURN_IF_ERROR(CapturedFunction::Create(
ctx, func_metadata_, kShardFuncOtherArgs, &captured_func));
uint64 num_elements = 0;
TF_RETURN_IF_ERROR(WriteData(ctx, dataset, std::move(captured_func), run_dir,
&num_elements));
TF_RETURN_IF_ERROR(WriteMetadataFile(ctx->env(), path, run_id,
dataset->output_dtypes(), num_elements,
true));
return absl::OkStatus();
}
Status SaveDatasetOp::WriteData(OpKernelContext* ctx, DatasetBase* dataset,
std::unique_ptr<CapturedFunction> captured_func,
const std::string& run_dir,
uint64* num_elements) {
IteratorContext::Params params(ctx);
auto function_handle_cache =
std::make_unique<FunctionHandleCache>(params.flr);
params.function_handle_cache = function_handle_cache.get();
ResourceMgr resource_mgr;
params.resource_mgr = &resource_mgr;
CancellationManager cancellation_manager(ctx->cancellation_manager());
params.cancellation_manager = &cancellation_manager;
IteratorContext iter_ctx(std::move(params));
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func;
TF_RETURN_IF_ERROR(
captured_func->Instantiate(&iter_ctx, &instantiated_captured_func));
DatasetBase* finalized_dataset;
TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset));
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(
&iter_ctx, nullptr, "Save", &iterator));
mutex mu;
Status status;
absl::flat_hash_map<int64_t, std::unique_ptr<snapshot_util::AsyncWriter>>
writers;
while (true) {
if (ctx->cancellation_manager()->IsCancelled()) {
return errors::Cancelled("Operation was cancelled");
}
std::vector<Tensor> element;
bool end_of_input;
TF_RETURN_IF_ERROR(iterator->GetNext(&iter_ctx, &element, &end_of_input));
if (end_of_input) {
break;
}
(*num_elements)++;
int64_t shard_index = -1;
TF_RETURN_IF_ERROR(GetShardIndex(
&iter_ctx, instantiated_captured_func.get(), element, &shard_index));
if (writers.count(shard_index) == 0) {
const auto snapshot_shard_directory =
snapshot_util::ShardDirectory(run_dir, shard_index);
auto writer_thread = std::make_unique<snapshot_util::AsyncWriter>(
ctx->env(), shard_index, snapshot_shard_directory,
0, compression_, kFileFormatVersion,
finalized_dataset->output_dtypes(), [&mu, &status](Status s) {
mutex_lock l(mu);
status.Update(s);
});
writers.insert({shard_index, std::move(writer_thread)});
}
writers[shard_index]->Write(element);
}
for (auto& writer : writers) {
writer.second->SignalEOF();
}
writers.clear();
return status;
}
Status SaveDatasetOp::GetShardIndex(IteratorContext* ctx,
InstantiatedCapturedFunction* function,
const std::vector<Tensor>& element,
int64_t* shard_index) {
if (!use_shard_func_) {
*shard_index = (*shard_index + 1) % GetCpuBudget();
return absl::OkStatus();
}
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(function->RunWithBorrowedArgs(
ctx, element, &output_tensors, nullptr));
if (output_tensors.size() != 1 || output_tensors[0].dtype() != DT_INT64 ||
output_tensors[0].NumElements() != 1) {
return errors::InvalidArgument("`shard_func` must return a scalar int64.");
}
*shard_index = output_tensors[0].flat<int64_t>()(0);
return absl::OkStatus();
}
Status SaveDatasetOp::WriteMetadataFile(Env* env, const std::string& path,
uint64 run_id,
const DataTypeVector& output_dtypes,
uint64 num_elements, bool finalized) {
SnapshotMetadataRecord metadata;
metadata.set_creation_timestamp(EnvTime::NowMicros());
metadata.set_run_id(
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
metadata.set_version(kFileFormatVersion);
for (const auto& output_dtype : output_dtypes) {
metadata.add_dtype(output_dtype);
}
metadata.set_finalized(finalized);
metadata.set_num_elements(num_elements);
return snapshot_util::WriteMetadataFile(env, path, &metadata);
}
class SaveDatasetV2Op::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, const tstring& path,
const std::string& compression,
std::unique_ptr<CapturedFunction> shard_func, bool use_shard_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
path_(path),
compression_(compression),
shard_func_(std::move(shard_func)),
use_shard_func_(use_shard_func) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* path_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(path_, &path_node));
std::vector<Node*> shard_func_other_args;
DataTypeVector shard_func_other_args_types;
TF_RETURN_IF_ERROR(shard_func_->AddToGraph(ctx, b, &shard_func_other_args,
&shard_func_other_args_types));
AttrValue compression_attr;
b->BuildAttrValue(compression_, &compression_attr);
AttrValue shard_func_attr;
b->BuildAttrValue(shard_func_->func(), &shard_func_attr);
AttrValue use_shard_func_attr;
b->BuildAttrValue(use_shard_func_, &use_shard_func_attr);
AttrValue shard_func_arguments_types_attr;
b->BuildAttrValue(shard_func_other_args_types,
&shard_func_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{std::make_pair(0, input_graph_node), std::make_pair(1, path_node)},
{std::make_pair(2, shard_func_other_args)},
{std::make_pair(kCompression, compression_attr),
std::make_pair(kShardFunc, shard_func_attr),
std::make_pair(kUseShardFunc, use_shard_func_attr),
std::make_pair(kShardFuncTarguments, shard_func_arguments_types_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
static constexpr const char* const kIteratorName = "Writer";
static constexpr const char* const kRunId = "run_id";
static constexpr const char* const kCurrentCheckpointId =
"current_checkpoint_id";
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
writers_closed_(false),
run_id_(0),
current_checkpoint_id_(0) {}
~Iterator() override {
mutex_lock l(mu_);
SignalEOF(true);
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
dataset()->shard_func_->Instantiate(ctx, &instantiated_shard_func_));
if (!ctx->is_restoring()) {
run_id_ = random::New64();
run_dir_ = snapshot_util::RunDirectory(
io::JoinPath(dataset()->writer_prefix_, dataset()->path_), run_id_);
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir_));
TF_RETURN_IF_ERROR(WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
0, false));
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = false;
snapshot_util::AsyncWriter* current_writer;
{
std::vector<Tensor> output_tensors;
mutex_lock l(mu_);
{
mutex_lock wsl(writer_status_mu_);
if (!writer_status_.ok() || writers_closed_) {
*end_of_sequence = true;
return writer_status_;
}
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
SignalEOF(true);
{
mutex_lock wsl(writer_status_mu_);
TF_RETURN_IF_ERROR(writer_status_);
}
return WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
dataset()->Cardinality(), true);
}
(num_elements_)++;
int64_t shard_index = 0;
TF_RETURN_IF_ERROR(
GetShardIndex(ctx, instantiated_shard_func_.get(), *out_tensors,
dataset()->use_shard_func_, &shard_index));
if (writers_.count(shard_index) == 0) {
auto snapshot_shard_directory =
snapshot_util::ShardDirectory(run_dir_, shard_index);
auto writer = std::make_unique<snapshot_util::AsyncWriter>(
ctx->env(), shard_index, snapshot_shard_directory,
current_checkpoint_id_, dataset()->compression_,
kFileFormatVersion, dataset()->output_dtypes(), [this](Status s) {
if (!s.ok()) {
mutex_lock l(writer_status_mu_);
writer_status_ = s;
}
});
writers_.insert({shard_index, std::move(writer)});
}
current_writer = writers_[shard_index].get();
}
current_writer->Write(*out_tensors);
return absl::OkStatus();
}
protected:
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kRunId),
static_cast<int64_t>(run_id_)));
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kCurrentCheckpointId),
static_cast<int64_t>(current_checkpoint_id_)));
SignalEOF(false);
writers_.clear();
current_checkpoint_id_++;
return SaveInput(ctx, writer, input_impl_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t run_id_signed;
int64_t current_checkpoint_id;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kRunId), &run_id_signed));
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kCurrentCheckpointId),
¤t_checkpoint_id));
run_id_ = static_cast<uint64>(run_id_signed);
run_dir_ = snapshot_util::RunDirectory(
io::JoinPath(dataset()->writer_prefix_, dataset()->path_), run_id_);
current_checkpoint_id_ = static_cast<uint64>(current_checkpoint_id);
if (ctx->is_restoring()) {
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir_));
TF_RETURN_IF_ERROR(WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
0, false));
}
return RestoreInput(ctx, reader, input_impl_);
}
private:
Status GetShardIndex(IteratorContext* ctx,
InstantiatedCapturedFunction* function,
const std::vector<Tensor>& element,
bool use_shard_func, int64_t* shard_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!use_shard_func) {
*shard_index = (*shard_index + 1) % GetCpuBudget();
return absl::OkStatus();
}
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(function->RunWithBorrowedArgs(
ctx, element, &output_tensors, nullptr));
if (output_tensors.size() != 1 || output_tensors[0].dtype() != DT_INT64 ||
output_tensors[0].NumElements() != 1) {
return errors::InvalidArgument(
"`shard_func` must return a scalar int64.");
}
*shard_index = output_tensors[0].flat<int64_t>()(0);
return absl::OkStatus();
}
Status WriteMetadataFile(Env* env, const std::string& path, uint64 run_id,
const DataTypeVector& output_dtypes,
uint64 num_elements, bool finalized)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
SnapshotMetadataRecord metadata;
metadata.set_creation_timestamp(EnvTime::NowMicros());
metadata.set_run_id(
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
metadata.set_version(kFileFormatVersion);
for (const auto& output_dtype : output_dtypes) {
metadata.add_dtype(output_dtype);
}
metadata.set_finalized(finalized);
metadata.set_num_elements(num_elements);
return snapshot_util::WriteMetadataFile(env, path, &metadata);
}
void SignalEOF(bool mark_closed) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!writers_closed_) {
for (auto& writer : writers_) {
writer.second->SignalEOF();
}
writers_.clear();
writers_closed_ = mark_closed;
}
}
mutex mu_;
mutex writer_status_mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t num_elements_;
absl::flat_hash_map<int64_t, std::unique_ptr<snapshot_util::AsyncWriter>>
writers_ TF_GUARDED_BY(mu_);
Status writer_status_ TF_GUARDED_BY(writer_status_mu_);
bool writers_closed_ TF_GUARDED_BY(mu_);
uint64 run_id_ TF_GUARDED_BY(mu_);
tstring run_dir_ TF_GUARDED_BY(mu_);
uint64 current_checkpoint_id_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_shard_func_
TF_GUARDED_BY(mu_);
};
const DatasetBase* input_;
const tstring path_;
const std::string compression_;
const std::unique_ptr<CapturedFunction> shard_func_;
const bool use_shard_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const std::shared_ptr<FunctionMetadata> func_metadata_;
const std::string writer_prefix_;
};
SaveDatasetV2Op::SaveDatasetV2Op(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseShardFunc, &use_shard_func_));
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kShardFunc, {},
&func_metadata_));
}
void SaveDatasetV2Op::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
DatasetBase* dataset;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &dataset));
tstring path;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kPath, &path));
std::unique_ptr<CapturedFunction> shard_func;
OP_REQUIRES_OK(
ctx, CapturedFunction::Create(ctx, func_metadata_, kShardFuncOtherArgs,
&shard_func));
*output = new Dataset(ctx, dataset, path, compression_, std::move(shard_func),
use_shard_func_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SaveDataset").Device(DEVICE_CPU), SaveDatasetOp);
REGISTER_KERNEL_BUILDER(Name("SaveDatasetV2").Device(DEVICE_CPU),
SaveDatasetV2Op);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/save_dataset_op.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr char kSaveDatasetV2NodeName[] = "save_dataset_v2";
class SaveDatasetV2Params : public DatasetParams {
public:
template <typename T>
SaveDatasetV2Params(T input_dataset_params, const tstring& path,
const std::string& compression,
FunctionDefHelper::AttrValueWrapper shard_func,
std::vector<FunctionDef> func_lib, bool use_shard_func,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name, DataTypeVector type_arguments)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
path_(path),
compression_(compression),
shard_func_(shard_func),
func_lib_(std::move(func_lib)),
use_shard_func_(use_shard_func),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors;
input_tensors.emplace_back(CreateTensor<tstring>(TensorShape({}), {path_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(SaveDatasetV2Op::kInputDataset);
input_names->emplace_back(SaveDatasetV2Op::kPath);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back(SaveDatasetV2Op::kCompression, compression_);
attr_vector->emplace_back(SaveDatasetV2Op::kShardFunc, shard_func_);
attr_vector->emplace_back(SaveDatasetV2Op::kUseShardFunc, use_shard_func_);
attr_vector->emplace_back(SaveDatasetV2Op::kShardFuncTarguments,
type_arguments_);
attr_vector->emplace_back(SaveDatasetV2Op::kOutputTypes, output_dtypes_);
attr_vector->emplace_back(SaveDatasetV2Op::kOutputShapes, output_shapes_);
return absl::OkStatus();
}
string path() const { return path_; }
string dataset_type() const override { return SaveDatasetV2Op::kDatasetType; }
string op_name() const override { return "SaveDatasetV2"; }
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::string path_;
std::string compression_;
FunctionDefHelper::AttrValueWrapper shard_func_;
std::vector<FunctionDef> func_lib_;
bool use_shard_func_;
DataTypeVector type_arguments_;
};
class SaveDatasetV2OpTest : public DatasetOpsTestBase {
public:
Status Initialize(const DatasetParams& dataset_params) {
TF_RETURN_IF_ERROR(DatasetOpsTestBase::Initialize(dataset_params));
auto params = static_cast<const SaveDatasetV2Params&>(dataset_params);
save_filename_ = params.path();
return absl::OkStatus();
}
protected:
std::string save_filename_;
};
SaveDatasetV2Params SaveDatasetV2Params1() {
return SaveDatasetV2Params(
RangeDatasetParams(0, 10, 2),
io::JoinPath(testing::TmpDir(), "save_data"),
"",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
false,
{DT_INT64},
{PartialTensorShape({})},
kSaveDatasetV2NodeName,
{});
}
SaveDatasetV2Params SaveDatasetV2Params2() {
return SaveDatasetV2Params(
RangeDatasetParams(0, 5, 1),
io::JoinPath(testing::TmpDir(), "save_data"),
"GZIP",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
true,
{DT_INT64},
{PartialTensorShape({})},
kSaveDatasetV2NodeName,
{});
}
std::vector<GetNextTestCase<SaveDatasetV2Params>> GetNextTestCases() {
return {{
SaveDatasetV2Params1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}, {8}})},
{SaveDatasetV2Params2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
class ParameterizedGetNextTest : public SaveDatasetV2OpTest,
public ::testing::WithParamInterface<
GetNextTestCase<SaveDatasetV2Params>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(SaveDatasetV2OpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(SaveDatasetV2OpTest, DatasetNodeName) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(SaveDatasetV2OpTest, DatasetTypeString) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString("SaveDatasetV2"));
}
TEST_F(SaveDatasetV2OpTest, DatasetOutputDtypes) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
std::vector<DatasetOutputDtypesTestCase<SaveDatasetV2Params>>
DatasetOutputDtypesTestCases() {
return {{SaveDatasetV2Params1(),
{DT_INT64}},
{SaveDatasetV2Params2(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<SaveDatasetV2Params>>
DatasetOutputShapesTestCases() {
return {{SaveDatasetV2Params1(),
{PartialTensorShape({})}},
{SaveDatasetV2Params2(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<SaveDatasetV2Params>> CardinalityTestCases() {
return {{SaveDatasetV2Params1(),
5},
{SaveDatasetV2Params2(),
5}};
}
DATASET_CARDINALITY_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
CardinalityTestCases())
TEST_F(SaveDatasetV2OpTest, IteratorPrefix) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
SaveDatasetV2Op::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<SaveDatasetV2Params>>
IteratorSaveAndRestoreTestCases() {
return {{SaveDatasetV2Params1(),
{0, 2, 4, 6, 8},
CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}, {8}})},
{SaveDatasetV2Params2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public SaveDatasetV2OpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<SaveDatasetV2Params>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(SaveDatasetV2OpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} | void SaveDatasetV2Op::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
DatasetBase* dataset;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &dataset));
tstring path;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kPath, &path));
std::unique_ptr<CapturedFunction> shard_func;
OP_REQUIRES_OK(
ctx, CapturedFunction::Create(ctx, func_metadata_, kShardFuncOtherArgs,
&shard_func));
*output = new Dataset(ctx, dataset, path, compression_, std::move(shard_func),
use_shard_func_);
} | TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
TEST_F(SaveDatasetV2OpTest, DatasetNodeName) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(SaveDatasetV2OpTest, DatasetTypeString) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString("SaveDatasetV2"));
}
TEST_F(SaveDatasetV2OpTest, DatasetOutputDtypes) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
DATASET_CARDINALITY_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
CardinalityTestCases())
TEST_F(SaveDatasetV2OpTest, IteratorPrefix) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
SaveDatasetV2Op::kDatasetType, dataset_params.iterator_prefix())));
}
|
#include "eval/eval/attribute_trail.h"
#include <algorithm>
#include <iterator>
#include <string>
#include <utility>
#include <vector>
#include "base/attribute.h"
namespace google::api::expr::runtime {
AttributeTrail AttributeTrail::Step(cel::AttributeQualifier qualifier) const {
if (empty()) return AttributeTrail();
std::vector<cel::AttributeQualifier> qualifiers;
qualifiers.reserve(attribute_->qualifier_path().size() + 1);
std::copy_n(attribute_->qualifier_path().begin(),
attribute_->qualifier_path().size(),
std::back_inserter(qualifiers));
qualifiers.push_back(std::move(qualifier));
return AttributeTrail(cel::Attribute(std::string(attribute_->variable_name()),
std::move(qualifiers)));
}
} | #include "eval/eval/attribute_trail.h"
#include <string>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/cel_value.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
TEST(AttributeTrailTest, AttributeTrailEmptyStep) {
std::string step = "step";
CelValue step_value = CelValue::CreateString(&step);
AttributeTrail trail;
ASSERT_TRUE(trail.Step(&step).empty());
ASSERT_TRUE(trail.Step(CreateCelAttributeQualifier(step_value)).empty());
}
TEST(AttributeTrailTest, AttributeTrailStep) {
std::string step = "step";
CelValue step_value = CelValue::CreateString(&step);
AttributeTrail trail = AttributeTrail("ident").Step(&step);
ASSERT_EQ(trail.attribute(),
CelAttribute("ident", {CreateCelAttributeQualifier(step_value)}));
}
} | #include "eval/eval/attribute_trail.h"
#include <algorithm>
#include <iterator>
#include <string>
#include <utility>
#include <vector>
#include "base/attribute.h"
namespace google::api::expr::runtime {
AttributeTrail AttributeTrail::Step(cel::AttributeQualifier qualifier) const {
if (empty()) return AttributeTrail();
std::vector<cel::AttributeQualifier> qualifiers;
qualifiers.reserve(attribute_->qualifier_path().size() + 1);
std::copy_n(attribute_->qualifier_path().begin(),
attribute_->qualifier_path().size(),
std::back_inserter(qualifiers));
qualifiers.push_back(std::move(qualifier));
return AttributeTrail(cel::Attribute(std::string(attribute_->variable_name()),
std::move(qualifiers)));
} | #include "eval/eval/attribute_trail.h"
#include <string>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/cel_value.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
TEST(AttributeTrailTest, AttributeTrailEmptyStep) {
std::string step = "step";
CelValue step_value = CelValue::CreateString(&step);
AttributeTrail trail;
ASSERT_TRUE(trail.Step(&step).empty());
ASSERT_TRUE(trail.Step(CreateCelAttributeQualifier(step_value)).empty());
}
TEST(AttributeTrailTest, AttributeTrailStep) {
std::string step = "step";
CelValue step_value = CelValue::CreateString(&step);
AttributeTrail trail = AttributeTrail("ident").Step(&step);
ASSERT_EQ(trail.attribute(),
CelAttribute("ident", {CreateCelAttributeQualifier(step_value)}));
} |
#include "tensorflow/core/util/debug_data_dumper.h"
#include <optional>
#include <set>
#include <string>
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
DebugDataDumper* DebugDataDumper::Global() {
static DebugDataDumper* global_instance_ = new DebugDataDumper();
return global_instance_;
}
DebugDataDumper::DebugDataDumper() { LoadEnvvars(); }
void DebugDataDumper::LoadEnvvars() {
const char* dump_wrapped = getenv("TF_DUMP_GRAPH_WRAPPED");
dump_wrapped_ = static_cast<bool>(dump_wrapped);
const char* name_filter = getenv("TF_DUMP_GRAPH_NAME_FILTER");
name_filter_ =
name_filter ? std::optional<std::string>{name_filter} : std::nullopt;
const char* groups_filter = getenv("TF_DUMP_GRAPH_GROUPS");
groups_filter_ =
groups_filter ? std::set<std::string>(absl::StrSplit(groups_filter, ','))
: std::set<std::string>({kDebugGroupMain});
}
bool DebugDataDumper::ShouldDump(const std::string& name,
const std::string& group) const {
if (!dump_wrapped_ && absl::StartsWith(name, "__wrapped__")) return false;
if (name_filter_ == std::nullopt) {
VLOG(1) << "Skip dumping graph '" << name
<< "', because TF_DUMP_GRAPH_NAME_FILTER is not set";
return false;
}
if (!absl::EqualsIgnoreCase(*name_filter_, "*") &&
!absl::StrContains(name, *name_filter_)) {
VLOG(1) << "Skip dumping graph '" << name
<< "', because TF_DUMP_GRAPH_NAME_FILTER is not '*' and "
<< "it is not contained by the graph name";
return false;
}
if (groups_filter_.find(group) == groups_filter_.end() &&
groups_filter_.find("*") == groups_filter_.end())
return false;
return true;
}
void DebugDataDumper::DumpOpCreationStackTraces(const std::string& name,
const std::string& group,
const std::string& tag,
const Graph* graph) {
if (!ShouldDump(name, group)) return;
std::string dump_filename = GetDumpFilename(name, group, tag);
DumpToFile(dump_filename, "", ".csv", "StackTrace",
[graph, &dump_filename](WritableFile* file) {
auto status = file->Append("node_id,node_name,stackframes\n");
if (!status.ok()) {
LOG(WARNING) << "error writing to file to " << dump_filename
<< ": " << status.message();
return status;
}
for (Node* node : graph->nodes()) {
auto stack_trace = node->GetStackTrace();
if (stack_trace == nullptr) continue;
int node_id = node->id();
const std::string& node_name = node->name();
std::vector<std::string> stackframes;
stackframes.reserve(stack_trace->ToFrames().size());
for (auto& frame : stack_trace->ToFrames()) {
stackframes.push_back(
absl::StrFormat("%s(%d): %s", frame.file_name,
frame.line_number, frame.function_name));
}
status = file->Append(
absl::StrFormat("%d,%s,%s\n", node_id, node_name,
absl::StrJoin(stackframes, ";")));
if (!status.ok()) {
LOG(WARNING) << "error writing to file to " << dump_filename
<< ": " << status.message();
return status;
}
}
return file->Close();
});
}
void DebugDataDumper::DumpGraph(const std::string& name,
const std::string& group,
const std::string& tag, const Graph* graph,
const FunctionLibraryDefinition* func_lib_def,
bool bypass_filter) {
if (!ShouldDump(name, group) && !bypass_filter) return;
std::string dump_filename = GetDumpFilename(name, group, tag);
if (dump_filename.size() > 255) {
LOG(WARNING) << "Failed to dump graph " << dump_filename << " to "
<< ", because the file name is longer than 255";
return;
}
GraphDef graph_def;
graph->ToGraphDef(&graph_def);
if (func_lib_def) {
FunctionLibraryDefinition reachable_lib_def =
func_lib_def->ReachableDefinitions(graph_def);
*graph_def.mutable_library() = reachable_lib_def.ToProto();
}
DumpGraphDefToFile(dump_filename, graph_def);
}
std::string DebugDataDumper::GetDumpFilename(const std::string& name,
const std::string& group,
const std::string& tag) {
std::string dump_name = name.empty() ? "unknown_graph" : name;
return absl::StrFormat("%s.%04d.%s.%s", dump_name, GetNextDumpId(name), group,
tag);
}
} | #include "tensorflow/core/util/debug_data_dumper.h"
#include <string>
#include "absl/strings/str_format.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(DebugDataDumper, NoPrefixTest) {
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
}
TEST(DebugDataDumper, NoNameFilterTest) {
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
}
TEST(DebugDataDumper, ShouldDumpTest) {
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "DumpGraph", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "DoNotDumpGraph", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false,
DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupBridgePhase1Clustering));
setenv("TF_DUMP_GRAPH_GROUPS", "main,bridge_phase1_clustering", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true,
DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupBridgePhase1Clustering));
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump(
"__wrapped__DumpGraphToFileTest", kDebugGroupMain));
setenv("TF_DUMP_GRAPH_WRAPPED", "true", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump(
"__wrapped__DumpGraphToFileTest", kDebugGroupMain));
}
TEST(DebugDataDumper, DumpFileBasenameTest) {
EXPECT_EQ("DumpFileBasenameTest1.0000.main.tag1",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest1",
kDebugGroupMain, "tag1"));
EXPECT_EQ("DumpFileBasenameTest1.0001.main.tag2",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest1",
kDebugGroupMain, "tag2"));
EXPECT_EQ("DumpFileBasenameTest2.0000.main.tag1",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest2",
kDebugGroupMain, "tag1"));
}
TEST(DebugDataDumper, DumpGraphToFileTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpGraph("DumpGraphToFileTest", kDebugGroupMain, "tag",
&graph, nullptr, false);
std::string dumpFilename =
io::JoinPath(dir, "DumpGraphToFileTest.0000.main.tag.pbtxt");
EXPECT_EQ(absl::OkStatus(), Env::Default()->FileExists(dumpFilename));
}
TEST(DebugDataDumper, DumpGraphLongFileNameCrashTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
std::string name = std::string(256, 'x');
DEBUG_DATA_DUMPER()->DumpGraph(name, kDebugGroupMain, "tag", &graph, nullptr,
false);
std::string dumpFilename = io::JoinPath(
dir, absl::StrFormat("%s.0000.main.tag.pbtxt", name.c_str()));
EXPECT_EQ(absl::StatusCode::kNotFound,
Env::Default()->FileExists(dumpFilename).code());
}
TEST(DebugDataDumper, DumpOpCreationStacktracesTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
setenv("TF_DUMP_OP_CREATION_STACKTRACES", "1", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
"DumpOpCreationStacktracesTest", kDebugGroupMain, "test", &graph);
std::string dumpFilename =
io::JoinPath(dir, "DumpOpCreationStacktracesTest.0000.main.test.csv");
EXPECT_EQ(absl::OkStatus(), Env::Default()->FileExists(dumpFilename));
}
TEST(DebugDataDumper, NoDumpOpCreationStacktracesTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
"DumpOpCreationStacktracesTest", kDebugGroupMain, "test", &graph);
std::string dumpFilename =
io::JoinPath(dir, "DumpOpCreationStacktracesTest.0000.main.test.json");
EXPECT_EQ(absl::StatusCode::kNotFound,
Env::Default()->FileExists(dumpFilename).code());
}
}
} | void DebugDataDumper::DumpGraph(const std::string& name,
const std::string& group,
const std::string& tag, const Graph* graph,
const FunctionLibraryDefinition* func_lib_def,
bool bypass_filter) {
if (!ShouldDump(name, group) && !bypass_filter) return;
std::string dump_filename = GetDumpFilename(name, group, tag);
if (dump_filename.size() > 255) {
LOG(WARNING) << "Failed to dump graph " << dump_filename << " to "
<< ", because the file name is longer than 255";
return;
}
GraphDef graph_def;
graph->ToGraphDef(&graph_def);
if (func_lib_def) {
FunctionLibraryDefinition reachable_lib_def =
func_lib_def->ReachableDefinitions(graph_def);
*graph_def.mutable_library() = reachable_lib_def.ToProto();
}
DumpGraphDefToFile(dump_filename, graph_def);
} | TEST(DebugDataDumper, DumpGraphToFileTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpGraph("DumpGraphToFileTest", kDebugGroupMain, "tag",
&graph, nullptr, false);
std::string dumpFilename =
io::JoinPath(dir, "DumpGraphToFileTest.0000.main.tag.pbtxt");
EXPECT_EQ(absl::OkStatus(), Env::Default()->FileExists(dumpFilename));
}
TEST(DebugDataDumper, DumpGraphLongFileNameCrashTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
std::string name = std::string(256, 'x');
DEBUG_DATA_DUMPER()->DumpGraph(name, kDebugGroupMain, "tag", &graph, nullptr,
false);
std::string dumpFilename = io::JoinPath(
dir, absl::StrFormat("%s.0000.main.tag.pbtxt", name.c_str()));
EXPECT_EQ(absl::StatusCode::kNotFound,
Env::Default()->FileExists(dumpFilename).code());
} |
#include "quiche/common/quiche_mem_slice_storage.h"
#include <algorithm>
#include <utility>
#include "quiche/quic/core/quic_utils.h"
namespace quiche {
QuicheMemSliceStorage::QuicheMemSliceStorage(
const struct iovec* iov, int iov_count, QuicheBufferAllocator* allocator,
const quic::QuicByteCount max_slice_len) {
if (iov == nullptr) {
return;
}
quic::QuicByteCount write_len = 0;
for (int i = 0; i < iov_count; ++i) {
write_len += iov[i].iov_len;
}
QUICHE_DCHECK_LT(0u, write_len);
size_t io_offset = 0;
while (write_len > 0) {
size_t slice_len = std::min(write_len, max_slice_len);
QuicheBuffer buffer = QuicheBuffer::CopyFromIovec(allocator, iov, iov_count,
io_offset, slice_len);
storage_.push_back(QuicheMemSlice(std::move(buffer)));
write_len -= slice_len;
io_offset += slice_len;
}
}
} | #include "quiche/common/quiche_mem_slice_storage.h"
#include <string>
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/simple_buffer_allocator.h"
namespace quiche {
namespace test {
namespace {
class QuicheMemSliceStorageImplTest : public QuicheTest {
public:
QuicheMemSliceStorageImplTest() = default;
};
TEST_F(QuicheMemSliceStorageImplTest, EmptyIov) {
QuicheMemSliceStorage storage(nullptr, 0, nullptr, 1024);
EXPECT_TRUE(storage.ToSpan().empty());
}
TEST_F(QuicheMemSliceStorageImplTest, SingleIov) {
SimpleBufferAllocator allocator;
std::string body(3, 'c');
struct iovec iov = {const_cast<char*>(body.data()), body.length()};
QuicheMemSliceStorage storage(&iov, 1, &allocator, 1024);
auto span = storage.ToSpan();
EXPECT_EQ("ccc", span[0].AsStringView());
EXPECT_NE(static_cast<const void*>(span[0].data()), body.data());
}
TEST_F(QuicheMemSliceStorageImplTest, MultipleIovInSingleSlice) {
SimpleBufferAllocator allocator;
std::string body1(3, 'a');
std::string body2(4, 'b');
struct iovec iov[] = {{const_cast<char*>(body1.data()), body1.length()},
{const_cast<char*>(body2.data()), body2.length()}};
QuicheMemSliceStorage storage(iov, 2, &allocator, 1024);
auto span = storage.ToSpan();
EXPECT_EQ("aaabbbb", span[0].AsStringView());
}
TEST_F(QuicheMemSliceStorageImplTest, MultipleIovInMultipleSlice) {
SimpleBufferAllocator allocator;
std::string body1(4, 'a');
std::string body2(4, 'b');
struct iovec iov[] = {{const_cast<char*>(body1.data()), body1.length()},
{const_cast<char*>(body2.data()), body2.length()}};
QuicheMemSliceStorage storage(iov, 2, &allocator, 4);
auto span = storage.ToSpan();
EXPECT_EQ("aaaa", span[0].AsStringView());
EXPECT_EQ("bbbb", span[1].AsStringView());
}
}
}
} | #include "quiche/common/quiche_mem_slice_storage.h"
#include <algorithm>
#include <utility>
#include "quiche/quic/core/quic_utils.h"
namespace quiche {
QuicheMemSliceStorage::QuicheMemSliceStorage(
const struct iovec* iov, int iov_count, QuicheBufferAllocator* allocator,
const quic::QuicByteCount max_slice_len) {
if (iov == nullptr) {
return;
}
quic::QuicByteCount write_len = 0;
for (int i = 0; i < iov_count; ++i) {
write_len += iov[i].iov_len;
}
QUICHE_DCHECK_LT(0u, write_len);
size_t io_offset = 0;
while (write_len > 0) {
size_t slice_len = std::min(write_len, max_slice_len);
QuicheBuffer buffer = QuicheBuffer::CopyFromIovec(allocator, iov, iov_count,
io_offset, slice_len);
storage_.push_back(QuicheMemSlice(std::move(buffer)));
write_len -= slice_len;
io_offset += slice_len;
}
} | TEST_F(QuicheMemSliceStorageImplTest, EmptyIov) {
QuicheMemSliceStorage storage(nullptr, 0, nullptr, 1024);
EXPECT_TRUE(storage.ToSpan().empty());
}
TEST_F(QuicheMemSliceStorageImplTest, SingleIov) {
SimpleBufferAllocator allocator;
std::string body(3, 'c');
struct iovec iov = {const_cast<char*>(body.data()), body.length()};
QuicheMemSliceStorage storage(&iov, 1, &allocator, 1024);
auto span = storage.ToSpan();
EXPECT_EQ("ccc", span[0].AsStringView());
EXPECT_NE(static_cast<const void*>(span[0].data()), body.data());
}
TEST_F(QuicheMemSliceStorageImplTest, MultipleIovInSingleSlice) {
SimpleBufferAllocator allocator;
std::string body1(3, 'a');
std::string body2(4, 'b');
struct iovec iov[] = {{const_cast<char*>(body1.data()), body1.length()},
{const_cast<char*>(body2.data()), body2.length()}};
QuicheMemSliceStorage storage(iov, 2, &allocator, 1024);
auto span = storage.ToSpan();
EXPECT_EQ("aaabbbb", span[0].AsStringView());
}
TEST_F(QuicheMemSliceStorageImplTest, MultipleIovInMultipleSlice) {
SimpleBufferAllocator allocator;
std::string body1(4, 'a');
std::string body2(4, 'b');
struct iovec iov[] = {{const_cast<char*>(body1.data()), body1.length()},
{const_cast<char*>(body2.data()), body2.length()}};
QuicheMemSliceStorage storage(iov, 2, &allocator, 4);
auto span = storage.ToSpan();
EXPECT_EQ("aaaa", span[0].AsStringView());
EXPECT_EQ("bbbb", span[1].AsStringView());
} |
#include "quiche/quic/qbone/bonnet/tun_device.h"
#include <fcntl.h>
#include <linux/if_tun.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <ios>
#include <string>
#include "absl/cleanup/cleanup.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/qbone/platform/kernel_interface.h"
ABSL_FLAG(std::string, qbone_client_tun_device_path, "/dev/net/tun",
"The path to the QBONE client's TUN device.");
namespace quic {
const int kInvalidFd = -1;
TunTapDevice::TunTapDevice(const std::string& interface_name, int mtu,
bool persist, bool setup_tun, bool is_tap,
KernelInterface* kernel)
: interface_name_(interface_name),
mtu_(mtu),
persist_(persist),
setup_tun_(setup_tun),
is_tap_(is_tap),
file_descriptor_(kInvalidFd),
kernel_(*kernel) {}
TunTapDevice::~TunTapDevice() {
if (!persist_) {
Down();
}
CloseDevice();
}
bool TunTapDevice::Init() {
if (interface_name_.empty() || interface_name_.size() >= IFNAMSIZ) {
QUIC_BUG(quic_bug_10995_1)
<< "interface_name must be nonempty and shorter than " << IFNAMSIZ;
return false;
}
if (!OpenDevice()) {
return false;
}
if (!ConfigureInterface()) {
return false;
}
return true;
}
bool TunTapDevice::Up() {
if (!setup_tun_) {
return true;
}
struct ifreq if_request;
memset(&if_request, 0, sizeof(if_request));
interface_name_.copy(if_request.ifr_name, IFNAMSIZ);
if_request.ifr_flags = IFF_UP;
return NetdeviceIoctl(SIOCSIFFLAGS, reinterpret_cast<void*>(&if_request));
}
bool TunTapDevice::Down() {
if (!setup_tun_) {
return true;
}
struct ifreq if_request;
memset(&if_request, 0, sizeof(if_request));
interface_name_.copy(if_request.ifr_name, IFNAMSIZ);
if_request.ifr_flags = 0;
return NetdeviceIoctl(SIOCSIFFLAGS, reinterpret_cast<void*>(&if_request));
}
int TunTapDevice::GetFileDescriptor() const { return file_descriptor_; }
bool TunTapDevice::OpenDevice() {
if (file_descriptor_ != kInvalidFd) {
CloseDevice();
}
struct ifreq if_request;
memset(&if_request, 0, sizeof(if_request));
interface_name_.copy(if_request.ifr_name, IFNAMSIZ);
if_request.ifr_flags = IFF_MULTI_QUEUE | IFF_NO_PI;
if (is_tap_) {
if_request.ifr_flags |= IFF_TAP;
} else {
if_request.ifr_flags |= IFF_TUN;
}
bool successfully_opened = false;
auto cleanup = absl::MakeCleanup([this, &successfully_opened]() {
if (!successfully_opened) {
CloseDevice();
}
});
const std::string tun_device_path =
absl::GetFlag(FLAGS_qbone_client_tun_device_path);
int fd = kernel_.open(tun_device_path.c_str(), O_RDWR);
if (fd < 0) {
QUIC_PLOG(WARNING) << "Failed to open " << tun_device_path;
return successfully_opened;
}
file_descriptor_ = fd;
if (!CheckFeatures(fd)) {
return successfully_opened;
}
if (kernel_.ioctl(fd, TUNSETIFF, reinterpret_cast<void*>(&if_request)) != 0) {
QUIC_PLOG(WARNING) << "Failed to TUNSETIFF on fd(" << fd << ")";
return successfully_opened;
}
if (kernel_.ioctl(
fd, TUNSETPERSIST,
persist_ ? reinterpret_cast<void*>(&if_request) : nullptr) != 0) {
QUIC_PLOG(WARNING) << "Failed to TUNSETPERSIST on fd(" << fd << ")";
return successfully_opened;
}
successfully_opened = true;
return successfully_opened;
}
bool TunTapDevice::ConfigureInterface() {
if (!setup_tun_) {
return true;
}
struct ifreq if_request;
memset(&if_request, 0, sizeof(if_request));
interface_name_.copy(if_request.ifr_name, IFNAMSIZ);
if_request.ifr_mtu = mtu_;
if (!NetdeviceIoctl(SIOCSIFMTU, reinterpret_cast<void*>(&if_request))) {
CloseDevice();
return false;
}
return true;
}
bool TunTapDevice::CheckFeatures(int tun_device_fd) {
unsigned int actual_features;
if (kernel_.ioctl(tun_device_fd, TUNGETFEATURES, &actual_features) != 0) {
QUIC_PLOG(WARNING) << "Failed to TUNGETFEATURES";
return false;
}
unsigned int required_features = IFF_TUN | IFF_NO_PI;
if ((required_features & actual_features) != required_features) {
QUIC_LOG(WARNING)
<< "Required feature does not exist. required_features: 0x" << std::hex
<< required_features << " vs actual_features: 0x" << std::hex
<< actual_features;
return false;
}
return true;
}
bool TunTapDevice::NetdeviceIoctl(int request, void* argp) {
int fd = kernel_.socket(AF_INET6, SOCK_DGRAM, 0);
if (fd < 0) {
QUIC_PLOG(WARNING) << "Failed to create AF_INET6 socket.";
return false;
}
if (kernel_.ioctl(fd, request, argp) != 0) {
QUIC_PLOG(WARNING) << "Failed ioctl request: " << request;
kernel_.close(fd);
return false;
}
kernel_.close(fd);
return true;
}
void TunTapDevice::CloseDevice() {
if (file_descriptor_ != kInvalidFd) {
kernel_.close(file_descriptor_);
file_descriptor_ = kInvalidFd;
}
}
} | #include "quiche/quic/qbone/bonnet/tun_device.h"
#include <linux/if.h>
#include <linux/if_tun.h>
#include <sys/ioctl.h>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/qbone/platform/mock_kernel.h"
namespace quic::test {
namespace {
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrEq;
using ::testing::Unused;
const char kDeviceName[] = "tun0";
const int kSupportedFeatures =
IFF_TUN | IFF_TAP | IFF_MULTI_QUEUE | IFF_ONE_QUEUE | IFF_NO_PI;
class TunDeviceTest : public QuicTest {
protected:
void SetUp() override {
EXPECT_CALL(mock_kernel_, socket(AF_INET6, _, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([this](Unused, Unused, Unused) {
EXPECT_CALL(mock_kernel_, close(next_fd_)).WillOnce(Return(0));
return next_fd_++;
}));
}
void SetInitExpectations(int mtu, bool persist) {
EXPECT_CALL(mock_kernel_, open(StrEq("/dev/net/tun"), _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([this](Unused, Unused) {
EXPECT_CALL(mock_kernel_, close(next_fd_)).WillOnce(Return(0));
return next_fd_++;
}));
EXPECT_CALL(mock_kernel_, ioctl(_, TUNGETFEATURES, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([](Unused, Unused, void* argp) {
auto* actual_flags = reinterpret_cast<int*>(argp);
*actual_flags = kSupportedFeatures;
return 0;
}));
EXPECT_CALL(mock_kernel_, ioctl(_, TUNSETIFF, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
EXPECT_EQ(IFF_TUN | IFF_MULTI_QUEUE | IFF_NO_PI, ifr->ifr_flags);
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
return 0;
}));
EXPECT_CALL(mock_kernel_, ioctl(_, TUNSETPERSIST, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([persist](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
if (persist) {
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
} else {
EXPECT_EQ(nullptr, ifr);
}
return 0;
}));
EXPECT_CALL(mock_kernel_, ioctl(_, SIOCSIFMTU, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([mtu](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
EXPECT_EQ(mtu, ifr->ifr_mtu);
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
return 0;
}));
}
void ExpectUp(bool fail) {
EXPECT_CALL(mock_kernel_, ioctl(_, SIOCSIFFLAGS, _))
.WillOnce(Invoke([fail](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
EXPECT_TRUE(ifr->ifr_flags & IFF_UP);
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
if (fail) {
return -1;
} else {
return 0;
}
}));
}
void ExpectDown(bool fail) {
EXPECT_CALL(mock_kernel_, ioctl(_, SIOCSIFFLAGS, _))
.WillOnce(Invoke([fail](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
EXPECT_FALSE(ifr->ifr_flags & IFF_UP);
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
if (fail) {
return -1;
} else {
return 0;
}
}));
}
MockKernel mock_kernel_;
int next_fd_ = 100;
};
TEST_F(TunDeviceTest, BasicWorkFlow) {
SetInitExpectations( 1500, false);
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_TRUE(tun_device.Init());
EXPECT_GT(tun_device.GetFileDescriptor(), -1);
ExpectUp( false);
EXPECT_TRUE(tun_device.Up());
ExpectDown( false);
}
TEST_F(TunDeviceTest, FailToOpenTunDevice) {
SetInitExpectations( 1500, false);
EXPECT_CALL(mock_kernel_, open(StrEq("/dev/net/tun"), _))
.WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
ExpectDown(false);
}
TEST_F(TunDeviceTest, FailToCheckFeature) {
SetInitExpectations( 1500, false);
EXPECT_CALL(mock_kernel_, ioctl(_, TUNGETFEATURES, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
ExpectDown(false);
}
TEST_F(TunDeviceTest, TooFewFeature) {
SetInitExpectations( 1500, false);
EXPECT_CALL(mock_kernel_, ioctl(_, TUNGETFEATURES, _))
.WillOnce(Invoke([](Unused, Unused, void* argp) {
int* actual_features = reinterpret_cast<int*>(argp);
*actual_features = IFF_TUN | IFF_ONE_QUEUE;
return 0;
}));
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
ExpectDown(false);
}
TEST_F(TunDeviceTest, FailToSetFlag) {
SetInitExpectations( 1500, true);
EXPECT_CALL(mock_kernel_, ioctl(_, TUNSETIFF, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
}
TEST_F(TunDeviceTest, FailToPersistDevice) {
SetInitExpectations( 1500, true);
EXPECT_CALL(mock_kernel_, ioctl(_, TUNSETPERSIST, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
}
TEST_F(TunDeviceTest, FailToOpenSocket) {
SetInitExpectations( 1500, true);
EXPECT_CALL(mock_kernel_, socket(AF_INET6, _, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
}
TEST_F(TunDeviceTest, FailToSetMtu) {
SetInitExpectations( 1500, true);
EXPECT_CALL(mock_kernel_, ioctl(_, SIOCSIFMTU, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
}
TEST_F(TunDeviceTest, FailToUp) {
SetInitExpectations( 1500, true);
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_TRUE(tun_device.Init());
EXPECT_GT(tun_device.GetFileDescriptor(), -1);
ExpectUp( true);
EXPECT_FALSE(tun_device.Up());
}
}
} | int TunTapDevice::GetFileDescriptor() const { return file_descriptor_; } | TEST_F(TunDeviceTest, BasicWorkFlow) {
SetInitExpectations( 1500, false);
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_TRUE(tun_device.Init());
EXPECT_GT(tun_device.GetFileDescriptor(), -1);
ExpectUp( false);
EXPECT_TRUE(tun_device.Up());
ExpectDown( false);
}
TEST_F(TunDeviceTest, FailToOpenTunDevice) {
SetInitExpectations( 1500, false);
EXPECT_CALL(mock_kernel_, open(StrEq("/dev/net/tun"), _))
.WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
ExpectDown(false);
} |
#ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_DEEP_COPY_TRANSFORM_REP_PTR_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_DEEP_COPY_TRANSFORM_REP_PTR_H_
#include <utility>
#include "tensorstore/index_space/internal/transform_rep.h"
namespace tensorstore {
namespace internal_index_space {
class DeepCopyTransformRepPtr {
public:
DeepCopyTransformRepPtr(std::nullptr_t = nullptr) : ptr_(nullptr) {}
explicit DeepCopyTransformRepPtr(TransformRep* ptr,
internal::adopt_object_ref_t)
: ptr_(ptr) {
assert(ptr == nullptr ||
(ptr->input_rank_capacity == 0 && ptr->output_rank_capacity == 0) ||
ptr->reference_count == 1);
}
explicit DeepCopyTransformRepPtr(TransformRep* ptr,
internal::acquire_object_ref_t) {
if (ptr) {
ptr_ =
TransformRep::Allocate(ptr->input_rank, ptr->output_rank).release();
CopyTransformRep(ptr, ptr_);
} else {
ptr_ = nullptr;
}
}
DeepCopyTransformRepPtr(DeepCopyTransformRepPtr&& other)
: ptr_(std::exchange(other.ptr_, nullptr)) {}
DeepCopyTransformRepPtr(const DeepCopyTransformRepPtr& other)
: DeepCopyTransformRepPtr(other.ptr_, internal::acquire_object_ref) {}
DeepCopyTransformRepPtr& operator=(DeepCopyTransformRepPtr&& other) {
if (ptr_) Free();
ptr_ = std::exchange(other.ptr_, nullptr);
return *this;
}
DeepCopyTransformRepPtr& operator=(const DeepCopyTransformRepPtr& other) {
return *this = DeepCopyTransformRepPtr(other.ptr_,
internal::acquire_object_ref);
}
DeepCopyTransformRepPtr& operator=(std::nullptr_t) {
if (ptr_) Free();
ptr_ = nullptr;
return *this;
}
~DeepCopyTransformRepPtr() {
if (ptr_) Free();
}
explicit operator bool() const { return static_cast<bool>(ptr_); }
TransformRep* get() const { return ptr_; }
TransformRep* operator->() const { return ptr_; }
TransformRep& operator*() const { return *ptr_; }
TransformRep* release() { return std::exchange(ptr_, nullptr); }
private:
void Free() {
TransformRep::Ptr<>(ptr_, internal::adopt_object_ref);
}
TransformRep* ptr_;
};
}
}
#endif | #include "tensorstore/index_space/internal/deep_copy_transform_rep_ptr.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::acquire_object_ref;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal_index_space::DeepCopyTransformRepPtr;
using ::tensorstore::internal_index_space::TransformRep;
TEST(DeepCopyTransformRepPtr, DefaultConstruct) {
DeepCopyTransformRepPtr ptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, Nullptr) {
DeepCopyTransformRepPtr ptr = nullptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, AdoptAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AdoptAllocateZero) {
auto ptr1 = TransformRep::Allocate(0, 0);
ptr1->input_rank = ptr1->output_rank = 0;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AcquireAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.get(), acquire_object_ref);
EXPECT_NE(ptr1.get(), ptr2.get());
EXPECT_EQ(7, ptr2->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, Release) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
auto ptr3 = ptr2.release();
EXPECT_EQ(ptr, ptr3);
TransformRep::Ptr<>(ptr3, adopt_object_ref);
}
TEST(DeepCopyTransformRepPtr, MoveConstruct) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
auto ptr3 = std::move(ptr2);
EXPECT_EQ(ptr, ptr3.get());
EXPECT_FALSE(ptr2);
}
TEST(DeepCopyTransformRepPtr, CopyConstruct) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
auto ptr3 = ptr2;
EXPECT_NE(ptr, ptr3.get());
EXPECT_TRUE(ptr2);
EXPECT_TRUE(ptr3);
EXPECT_EQ(7, ptr3->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, AssignNullptr) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
ptr2 = nullptr;
EXPECT_EQ(nullptr, ptr2.get());
}
TEST(DeepCopyTransformRepPtr, MoveAssignNonNullToNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr3 = std::move(ptr2);
EXPECT_EQ(ptr, ptr3.get());
EXPECT_FALSE(ptr2);
}
TEST(DeepCopyTransformRepPtr, MoveAssignNullToNonNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr2 = std::move(ptr3);
EXPECT_FALSE(ptr2);
EXPECT_FALSE(ptr3);
}
TEST(DeepCopyTransformRepPtr, CopyAssignNonNullToNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr3 = ptr2;
EXPECT_TRUE(ptr2);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_NE(ptr, ptr3.get());
EXPECT_EQ(7, ptr3->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, CopyAssignNullToNonNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr2 = ptr3;
EXPECT_FALSE(ptr2);
EXPECT_FALSE(ptr3);
}
} | TransformRep* get() const { return ptr_; } | TEST(DeepCopyTransformRepPtr, DefaultConstruct) {
DeepCopyTransformRepPtr ptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, Nullptr) {
DeepCopyTransformRepPtr ptr = nullptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, AdoptAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AdoptAllocateZero) {
auto ptr1 = TransformRep::Allocate(0, 0);
ptr1->input_rank = ptr1->output_rank = 0;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AcquireAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.get(), acquire_object_ref);
EXPECT_NE(ptr1.get(), ptr2.get());
EXPECT_EQ(7, ptr2->input_origin()[0]);
} |
#include "tensorflow/core/profiler/internal/tfprof_timeline.h"
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
namespace tensorflow {
namespace tfprof {
namespace {
int kMaxDisplayedMemNode = 10;
std::string GetTimeDevName(const std::string& dev) {
if (dev.find("stream") != dev.npos) {
return absl::StrCat("Op execution threads: ", dev);
} else {
return absl::StrCat("Op scheduling threads: ", dev);
}
}
std::string GetMemoryLaneName(const std::string& dev) {
return absl::StrCat("mem usage on:", dev);
}
}
Json::Value ChromeTraceFormatter::CreateEvent(const string& ph,
const string& category,
const string& name, int64_t pid,
int64_t tid, int64_t ts) {
Json::Value event(Json::objectValue);
event["ph"] = Json::Value(ph);
event["cat"] = Json::Value(category);
event["name"] = Json::Value(name);
event["pid"] = Json::Int64(pid);
event["tid"] = Json::Int64(tid);
event["ts"] = Json::Int64(ts);
return event;
}
void ChromeTraceFormatter::EmitPID(const string& name, int64_t pid) {
Json::Value event(Json::objectValue);
event["name"] = Json::Value("process_name");
event["ph"] = Json::Value("M");
event["pid"] = Json::Int64(pid);
Json::Value args(Json::objectValue);
args["name"] = Json::Value(name);
event["args"] = args;
metadata_.push_back(event);
}
void ChromeTraceFormatter::EmitRegion(int64_t ts, int64_t duration, int64_t pid,
int64_t tid, const string& category,
const string& name, Json::Value args) {
Json::Value event = CreateEvent("X", category, name, pid, tid, ts);
event["dur"] = Json::Int64(duration);
event["args"] = std::move(args);
metadata_.push_back(event);
}
void ChromeTraceFormatter::EmitFlowStart(const string& name, int64_t ts,
int64_t pid, int64_t tid,
int64_t flow_id) {
Json::Value event = CreateEvent("s", "DataFlow", name, pid, tid, ts);
event["id"] = Json::Int64(flow_id);
events_.push_back(event);
}
void ChromeTraceFormatter::EmitFlowEnd(const string& name, int64_t ts,
int64_t pid, int64_t tid,
int64_t flow_id) {
Json::Value event = CreateEvent("t", "DataFlow", name, pid, tid, ts);
event["id"] = Json::Int64(flow_id);
events_.push_back(event);
}
void ChromeTraceFormatter::EmitCounter(
const string& category, const string& name, int64_t pid, int64_t ts,
const string& device, int64_t bytes,
const std::map<int64_t, std::vector<string>>& tensor_mem) {
Json::Value event = CreateEvent("C", category, "Allocated Bytes", pid, 0, ts);
Json::Value args(Json::objectValue);
args["Allocator Bytes in Use"] = Json::Int64(bytes);
event["args"] = args;
events_.push_back(event);
Json::Value event2 =
CreateEvent("C", category, "Top Allocations", pid + 1, 0, ts);
Json::Value args2(Json::objectValue);
for (int i = 1; i < kMaxDisplayedMemNode; ++i) {
args2[absl::StrFormat("Top Allocation %02d", i)] = Json::Value("N/A");
}
int count = 0;
for (auto it = tensor_mem.rbegin(); it != tensor_mem.rend(); ++it) {
for (const string& t : it->second) {
if (bytes < it->first || count >= kMaxDisplayedMemNode) {
break;
}
args2[absl::StrFormat("Top Allocation %02d", count)] =
Json::Value(absl::StrCat(it->first / 1000000.0, " MB from ", t));
++count;
bytes -= it->first;
}
}
args2[std::string("Not Displayed")] =
Json::Value(absl::StrFormat("%.2f MB", bytes / 1000000.0));
event2["args"] = args2;
events_.push_back(event2);
}
string ChromeTraceFormatter::Format() {
Json::Value trace;
trace["traceEvents"] = Json::Value(Json::arrayValue);
for (const Json::Value& v : metadata_) {
trace["traceEvents"].append(v);
}
for (const Json::Value& v : events_) {
trace["traceEvents"].append(v);
}
Json::FastWriter writer;
string trace_str = writer.write(trace);
if (trace_str.length() > 200 * 1024 * 1024) {
absl::FPrintF(stderr,
"Trace file is over 200MB. Chrome might not be able to "
"display it. Consider to use filters (e.g. -min_micros "
"> 1000 or -op_type .*gpu:0.* to reduce the size.\n");
}
return trace_str;
}
void MemoryTracker::TrackNode(int64_t step, const GraphNode* node) {
if (!node->Trackable(step)) {
return;
}
Device& dev = devices_[node->node->canonical_device()];
std::map<int64_t, int64_t> allocs;
for (const auto& alloc : node->node->allocations(step)) {
allocs[alloc.alloc_micros()] += alloc.alloc_bytes();
dev.tracked_allocations[alloc.alloc_micros()] += alloc.alloc_bytes();
}
dev.tracked_allocations[0] += node->node->accelerator_persistent_bytes();
allocs[0] += node->node->accelerator_persistent_bytes();
int64_t last = 0;
std::map<int64_t, int64_t>& aggregate_allocs =
dev.tensor_allocs[node->name()];
for (auto it = allocs.begin(); it != allocs.end(); ++it) {
last += it->second;
aggregate_allocs[it->first] = last;
}
for (const auto& bytes_in_use : node->node->allocator_bytes_in_use(step)) {
if (bytes_in_use.first <= 0) continue;
dev.allocations[bytes_in_use.first] = bytes_in_use.second;
}
}
void Timeline::AllocateTimeNodes(GraphNode* gnode) {
if (gnode->Trackable(step_)) {
TrackNode(gnode);
const TFGraphNode* node = gnode->node;
for (const auto& kernel_execs : node->op_execs(step_)) {
const string& device = kernel_execs.first;
if (process_.find(device) == process_.end()) {
int64_t pid = AllocatePID();
process_[device] = std::make_unique<Process>(device, pid);
chrome_formatter_.EmitPID(GetTimeDevName(device), pid);
}
Process* p = process_[device].get();
for (const auto& exec : kernel_execs.second) {
int64_t start_micros = exec.first;
int64_t exec_micros = exec.second;
if (tnodes_[device].find(start_micros) == tnodes_[device].end()) {
tnodes_[device][start_micros] =
std::make_unique<TimeNode>(p, gnode, start_micros, exec_micros);
}
}
}
}
for (GraphNode* n : gnode->show_children) {
AllocateTimeNodes(n);
}
}
void Timeline::GenerateGraphTimeline(const std::vector<GraphNode*>& gnodes) {
for (GraphNode* gnode : gnodes) {
AllocateTimeNodes(gnode);
}
for (auto& process : tnodes_) {
if (!IsCanonicalDevice(process.first)) continue;
for (auto& tn : process.second) {
TimeNode* tnode = tn.second.get();
for (GraphNode* inp : tnode->node->children) {
if (!inp->account || !inp->Trackable(step_)) {
continue;
}
for (const auto& execs : inp->node->cpu_execs(step_)) {
if (!IsCanonicalDevice(execs.first)) continue;
if (process.first == execs.first) {
continue;
}
for (const auto& exec : execs.second) {
int64_t start_micros = exec.first;
auto cprocess = tnodes_.find(execs.first);
if (cprocess == tnodes_.end()) continue;
auto ctn = cprocess->second.find(start_micros);
if (ctn == cprocess->second.end()) continue;
ctn->second->next_tnodes.push_back(tnode);
}
}
}
}
}
AllocateLanes();
absl::FPrintF(stdout, "generating trace file.\n");
int64_t flow_id = 1;
for (const auto& process : alloc_nodes_) {
for (const auto& lane : process.second) {
for (const auto& node : lane.second) {
TimeNode* tnode = node.second;
Json::Value args(Json::objectValue);
args["name"] = Json::Value(tnode->name());
chrome_formatter_.EmitRegion(node.first, tnode->exec_micros,
process.first, lane.first, "Op",
tnode->name(), args);
for (TimeNode* next_tnode : node.second->next_tnodes) {
chrome_formatter_.EmitFlowStart(
tnode->name() + "_flow", tnode->start_micros + tnode->exec_micros,
process.first, lane.first, flow_id);
chrome_formatter_.EmitFlowEnd(
tnode->name() + "_flow", next_tnode->start_micros,
next_tnode->process->pid, next_tnode->tid, flow_id);
flow_id += 1;
}
}
}
}
for (const auto& dev : mem_tracker_.devices()) {
if (IsPlacedOnCPU(dev.first)) {
continue;
}
int64_t pid = AllocatePID();
chrome_formatter_.EmitPID(GetMemoryLaneName(dev.first), pid);
int64_t pid2 = AllocatePID();
chrome_formatter_.EmitPID(GetMemoryLaneName(dev.first) + " allocations",
pid2);
const MemoryTracker::Device& device = dev.second;
int64_t max_bytes_in_use = 0;
int64_t cur_bytes_in_use = 0;
int64_t last_point = 0;
for (const auto& alloc : device.allocations) {
cur_bytes_in_use = alloc.second;
max_bytes_in_use = std::max(max_bytes_in_use, cur_bytes_in_use);
int64_t ts = alloc.first;
if (ts - last_point < 100) continue;
last_point = ts;
std::map<int64_t, std::vector<string>> tensor_mem;
for (const auto& tensor_alloc_it : dev.second.tensor_allocs) {
const auto& tensor_alloc = tensor_alloc_it.second;
auto it = tensor_alloc.lower_bound(ts);
if (it != tensor_alloc.begin()) {
--it;
}
if (it->second > 0) {
tensor_mem[it->second].push_back(tensor_alloc_it.first);
}
}
chrome_formatter_.EmitCounter("Memory", "Memory Series", pid, ts,
dev.first, cur_bytes_in_use, tensor_mem);
}
if (IsPlacedOnAccelerator(dev.first)) {
absl::FPrintF(stdout, "%s peak memory: %.2f MB\n", dev.first,
max_bytes_in_use / 1000000.0);
}
}
OutputTimeline();
}
void Timeline::GenerateScopeTimeline(const ScopeNode* node) {
std::set<int64_t> visited_depth;
EmitTreeNode(node, 0, node->proto().total_exec_micros(), 0, &visited_depth);
OutputTimeline();
}
void Timeline::GenerateCodeTimeline(const CodeNode* node) {
std::set<int64_t> visited_depth;
EmitTreeNode(node, 0, node->proto().total_exec_micros(), 0, &visited_depth);
OutputTimeline();
}
void Timeline::OutputTimeline() {
std::string outfile = absl::StrFormat("%s_%d", outfile_, step());
Status s =
WriteStringToFile(Env::Default(), outfile, chrome_formatter_.Format());
if (!s.ok()) {
absl::FPrintF(stderr, "Failed to write timeline file: %s\nError: %s\n",
outfile, s.ToString());
return;
}
absl::FPrintF(stdout,
"\n******************************************************\n");
absl::FPrintF(stdout,
"Timeline file is written to %s.\n"
"Open a Chrome browser, enter URL chrome:
"load the timeline file.",
outfile);
absl::FPrintF(stdout,
"\n******************************************************\n");
fflush(stdout);
}
void Timeline::AllocateLanes() {
for (auto& process : tnodes_) {
Process* p = process_[process.first].get();
for (auto& tnode : process.second) {
int64_t start_time = tnode.second->start_micros;
int64_t end_time = tnode.second->start_micros + tnode.second->exec_micros;
int64_t l = -1;
for (int64_t i = 0, end = p->lanes.size(); i < end; ++i) {
const auto& lane = p->lanes[i];
l = i;
for (auto cur_it = lane.rbegin(); cur_it != lane.rend(); ++cur_it) {
if (cur_it->second > start_time) {
l = -1;
break;
}
if (start_time > cur_it->second) {
break;
}
}
if (l >= 0) {
break;
}
}
if (l < 0) {
l = p->lanes.size();
std::map<int64_t, int64_t> nlane;
nlane[start_time] = end_time;
p->lanes.push_back(nlane);
} else {
p->lanes[l][start_time] = end_time;
}
tnode.second->tid = l;
alloc_nodes_[p->pid][l][start_time] = tnode.second.get();
}
}
}
int64_t Timeline::AllocatePID() {
int64_t cur_pid = next_pid_;
next_pid_ += 1;
return cur_pid;
}
}
} | #include <memory>
#include <utility>
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfTimelineTest : public ::testing::Test {
protected:
TFProfTimelineTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb(
new tensorflow::RunMetadata());
string run_meta_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/run_meta");
TF_CHECK_OK(
ReadProtoFile(Env::Default(), run_meta_path, run_meta_pb.get(), true));
tf_stats_ = std::make_unique<TFStats>(
std::move(graph_pb), std::move(run_meta_pb), nullptr, nullptr);
tf_stats_->BuildAllViews();
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfTimelineTest, GraphView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(10000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("graph", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(16556121177519539380ull, Hash64(dump_str));
}
TEST_F(TFProfTimelineTest, ScopeView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(17545174915963890413ull, Hash64(dump_str));
}
}
} | void Timeline::GenerateScopeTimeline(const ScopeNode* node) {
std::set<int64_t> visited_depth;
EmitTreeNode(node, 0, node->proto().total_exec_micros(), 0, &visited_depth);
OutputTimeline();
} | TEST_F(TFProfTimelineTest, GraphView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(10000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("graph", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(16556121177519539380ull, Hash64(dump_str));
}
TEST_F(TFProfTimelineTest, ScopeView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(17545174915963890413ull, Hash64(dump_str));
} |
#include "runtime/standard/time_functions.h"
#include <functional>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "base/builtins.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/overflow.h"
#include "internal/status_macros.h"
namespace cel {
namespace {
absl::Status FindTimeBreakdown(absl::Time timestamp, absl::string_view tz,
absl::TimeZone::CivilInfo* breakdown) {
absl::TimeZone time_zone;
if (tz.empty()) {
*breakdown = time_zone.At(timestamp);
return absl::OkStatus();
}
if (absl::LoadTimeZone(tz, &time_zone)) {
*breakdown = time_zone.At(timestamp);
return absl::OkStatus();
}
if (absl::StrContains(tz, ":")) {
std::string dur = absl::StrCat(tz, "m");
absl::StrReplaceAll({{":", "h"}}, &dur);
absl::Duration d;
if (absl::ParseDuration(dur, &d)) {
timestamp += d;
*breakdown = time_zone.At(timestamp);
return absl::OkStatus();
}
}
return absl::InvalidArgumentError("Invalid timezone");
}
Value GetTimeBreakdownPart(
ValueManager& value_factory, absl::Time timestamp, absl::string_view tz,
const std::function<int64_t(const absl::TimeZone::CivilInfo&)>&
extractor_func) {
absl::TimeZone::CivilInfo breakdown;
auto status = FindTimeBreakdown(timestamp, tz, &breakdown);
if (!status.ok()) {
return value_factory.CreateErrorValue(status);
}
return value_factory.CreateIntValue(extractor_func(breakdown));
}
Value GetFullYear(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.year();
});
}
Value GetMonth(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.month() - 1;
});
}
Value GetDayOfYear(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(
value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return absl::GetYearDay(absl::CivilDay(breakdown.cs)) - 1;
});
}
Value GetDayOfMonth(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.day() - 1;
});
}
Value GetDate(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.day();
});
}
Value GetDayOfWeek(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(
value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
absl::Weekday weekday = absl::GetWeekday(breakdown.cs);
int weekday_num = static_cast<int>(weekday);
weekday_num = (weekday_num == 6) ? 0 : weekday_num + 1;
return weekday_num;
});
}
Value GetHours(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.hour();
});
}
Value GetMinutes(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.minute();
});
}
Value GetSeconds(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.second();
});
}
Value GetMilliseconds(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(
value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return absl::ToInt64Milliseconds(breakdown.subsecond);
});
}
absl::Status RegisterTimestampFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kFullYear, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetFullYear(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kFullYear, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetFullYear(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kMonth, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetMonth(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(builtin::kMonth,
true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetMonth(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kDayOfYear, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetDayOfYear(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kDayOfYear, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetDayOfYear(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kDayOfMonth, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetDayOfMonth(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kDayOfMonth, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetDayOfMonth(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kDate, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetDate(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(builtin::kDate,
true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetDate(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kDayOfWeek, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetDayOfWeek(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kDayOfWeek, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetDayOfWeek(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kHours, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetHours(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(builtin::kHours,
true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetHours(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kMinutes, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetMinutes(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kMinutes, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetMinutes(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kSeconds, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetSeconds(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kSeconds, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetSeconds(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kMilliseconds, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetMilliseconds(value_factory, ts, tz.ToString());
})));
return registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kMilliseconds, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetMilliseconds(value_factory, ts, "");
}));
}
absl::Status RegisterCheckedTimeArithmeticFunctions(
FunctionRegistry& registry) {
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time,
absl::Duration>::CreateDescriptor(builtin::kAdd,
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time, absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Time t1,
absl::Duration d2) -> absl::StatusOr<Value> {
auto sum = cel::internal::CheckedAdd(t1, d2);
if (!sum.ok()) {
return value_factory.CreateErrorValue(sum.status());
}
return value_factory.CreateTimestampValue(*sum);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration,
absl::Time>::CreateDescriptor(builtin::kAdd, false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration, absl::Time>::
WrapFunction([](ValueManager& value_factory, absl::Duration d2,
absl::Time t1) -> absl::StatusOr<Value> {
auto sum = cel::internal::CheckedAdd(t1, d2);
if (!sum.ok()) {
return value_factory.CreateErrorValue(sum.status());
}
return value_factory.CreateTimestampValue(*sum);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration,
absl::Duration>::CreateDescriptor(builtin::kAdd,
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration,
absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Duration d1,
absl::Duration d2) -> absl::StatusOr<Value> {
auto sum = cel::internal::CheckedAdd(d1, d2);
if (!sum.ok()) {
return value_factory.CreateErrorValue(sum.status());
}
return value_factory.CreateDurationValue(*sum);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time, absl::Duration>::
CreateDescriptor(builtin::kSubtract, false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time, absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Time t1,
absl::Duration d2) -> absl::StatusOr<Value> {
auto diff = cel::internal::CheckedSub(t1, d2);
if (!diff.ok()) {
return value_factory.CreateErrorValue(diff.status());
}
return value_factory.CreateTimestampValue(*diff);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time,
absl::Time>::CreateDescriptor(builtin::kSubtract,
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time, absl::Time>::
WrapFunction([](ValueManager& value_factory, absl::Time t1,
absl::Time t2) -> absl::StatusOr<Value> {
auto diff = cel::internal::CheckedSub(t1, t2);
if (!diff.ok()) {
return value_factory.CreateErrorValue(diff.status());
}
return value_factory.CreateDurationValue(*diff);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<
absl::StatusOr<Value>, absl::Duration,
absl::Duration>::CreateDescriptor(builtin::kSubtract, false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration,
absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Duration d1,
absl::Duration d2) -> absl::StatusOr<Value> {
auto diff = cel::internal::CheckedSub(d1, d2);
if (!diff.ok()) {
return value_factory.CreateErrorValue(diff.status());
}
return value_factory.CreateDurationValue(*diff);
})));
return absl::OkStatus();
}
absl::Status RegisterUncheckedTimeArithmeticFunctions(
FunctionRegistry& registry) {
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time,
absl::Duration>::CreateDescriptor(builtin::kAdd,
false),
BinaryFunctionAdapter<Value, absl::Time, absl::Duration>::WrapFunction(
[](ValueManager& value_factory, absl::Time t1,
absl::Duration d2) -> Value {
return value_factory.CreateUncheckedTimestampValue(t1 + d2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Duration,
absl::Time>::CreateDescriptor(builtin::kAdd, false),
BinaryFunctionAdapter<Value, absl::Duration, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Duration d2,
absl::Time t1) -> Value {
return value_factory.CreateUncheckedTimestampValue(t1 + d2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Duration,
absl::Duration>::CreateDescriptor(builtin::kAdd,
false),
BinaryFunctionAdapter<Value, absl::Duration, absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Duration d1,
absl::Duration d2) -> Value {
return value_factory.CreateUncheckedDurationValue(d1 + d2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, absl::Duration>::
CreateDescriptor(builtin::kSubtract, false),
BinaryFunctionAdapter<Value, absl::Time, absl::Duration>::WrapFunction(
[](ValueManager& value_factory, absl::Time t1,
absl::Duration d2) -> Value {
return value_factory.CreateUncheckedTimestampValue(t1 - d2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, absl::Time>::CreateDescriptor(
builtin::kSubtract, false),
BinaryFunctionAdapter<Value, absl::Time, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time t1,
absl::Time t2) -> Value {
return value_factory.CreateUncheckedDurationValue(t1 - t2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Duration, absl::Duration>::
CreateDescriptor(builtin::kSubtract, false),
BinaryFunctionAdapter<Value, absl::Duration, absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Duration d1,
absl::Duration d2) -> Value {
return value_factory.CreateUncheckedDurationValue(d1 - d2);
})));
return absl::OkStatus();
}
absl::Status RegisterDurationFunctions(FunctionRegistry& registry) {
using DurationAccessorFunction =
UnaryFunctionAdapter<int64_t, absl::Duration>;
CEL_RETURN_IF_ERROR(registry.Register(
DurationAccessorFunction::CreateDescriptor(builtin::kHours, true),
DurationAccessorFunction::WrapFunction(
[](ValueManager&, absl::Duration d) -> int64_t {
return absl::ToInt64Hours(d);
})));
CEL_RETURN_IF_ERROR(registry.Register(
DurationAccessorFunction::CreateDescriptor(builtin::kMinutes, true),
DurationAccessorFunction::WrapFunction(
[](ValueManager&, absl::Duration d) -> int64_t {
return absl::ToInt64Minutes(d);
})));
CEL_RETURN_IF_ERROR(registry.Register(
DurationAccessorFunction::CreateDescriptor(builtin::kSeconds, true),
DurationAccessorFunction::WrapFunction(
[](ValueManager&, absl::Duration d) -> int64_t {
return absl::ToInt64Seconds(d);
})));
return registry.Register(
DurationAccessorFunction::CreateDescriptor(builtin::kMilliseconds, true),
DurationAccessorFunction::WrapFunction(
[](ValueManager&, absl::Duration d) -> int64_t {
constexpr int64_t millis_per_second = 1000L;
return absl::ToInt64Milliseconds(d) % millis_per_second;
}));
}
}
absl::Status RegisterTimeFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(RegisterTimestampFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterDurationFunctions(registry));
if (options.enable_timestamp_duration_overflow_errors) {
return RegisterCheckedTimeArithmeticFunctions(registry);
}
return RegisterUncheckedTimeArithmeticFunctions(registry);
}
} | #include "runtime/standard/time_functions.h"
#include <vector>
#include "base/builtins.h"
#include "base/function_descriptor.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::UnorderedElementsAre;
MATCHER_P3(MatchesOperatorDescriptor, name, expected_kind1, expected_kind2,
"") {
const FunctionDescriptor& descriptor = *arg;
std::vector<Kind> types{expected_kind1, expected_kind2};
return descriptor.name() == name && descriptor.receiver_style() == false &&
descriptor.types() == types;
}
MATCHER_P2(MatchesTimeAccessor, name, kind, "") {
const FunctionDescriptor& descriptor = *arg;
std::vector<Kind> types{kind};
return descriptor.name() == name && descriptor.receiver_style() == true &&
descriptor.types() == types;
}
MATCHER_P2(MatchesTimezoneTimeAccessor, name, kind, "") {
const FunctionDescriptor& descriptor = *arg;
std::vector<Kind> types{kind, Kind::kString};
return descriptor.name() == name && descriptor.receiver_style() == true &&
descriptor.types() == types;
}
TEST(RegisterTimeFunctions, MathOperatorsRegistered) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTimeFunctions(registry, options));
auto registered_functions = registry.ListFunctions();
EXPECT_THAT(registered_functions[builtin::kAdd],
UnorderedElementsAre(
MatchesOperatorDescriptor(builtin::kAdd, Kind::kDuration,
Kind::kDuration),
MatchesOperatorDescriptor(builtin::kAdd, Kind::kTimestamp,
Kind::kDuration),
MatchesOperatorDescriptor(builtin::kAdd, Kind::kDuration,
Kind::kTimestamp)));
EXPECT_THAT(registered_functions[builtin::kSubtract],
UnorderedElementsAre(
MatchesOperatorDescriptor(builtin::kSubtract, Kind::kDuration,
Kind::kDuration),
MatchesOperatorDescriptor(builtin::kSubtract,
Kind::kTimestamp, Kind::kDuration),
MatchesOperatorDescriptor(
builtin::kSubtract, Kind::kTimestamp, Kind::kTimestamp)));
}
TEST(RegisterTimeFunctions, AccessorsRegistered) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTimeFunctions(registry, options));
auto registered_functions = registry.ListFunctions();
EXPECT_THAT(
registered_functions[builtin::kFullYear],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kFullYear, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kFullYear, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDate],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDate, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDate, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kMonth],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kMonth, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kMonth, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDayOfYear],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDayOfYear, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDayOfYear, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDayOfMonth],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDayOfMonth, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDayOfMonth, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDayOfWeek],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDayOfWeek, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDayOfWeek, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kHours],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kHours, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kHours, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kHours, Kind::kDuration)));
EXPECT_THAT(
registered_functions[builtin::kMinutes],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kMinutes, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kMinutes, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kMinutes, Kind::kDuration)));
EXPECT_THAT(
registered_functions[builtin::kSeconds],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kSeconds, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kSeconds, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kSeconds, Kind::kDuration)));
EXPECT_THAT(
registered_functions[builtin::kMilliseconds],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kMilliseconds, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kMilliseconds, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kMilliseconds, Kind::kDuration)));
}
}
} | absl::Status RegisterTimeFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(RegisterTimestampFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterDurationFunctions(registry));
if (options.enable_timestamp_duration_overflow_errors) {
return RegisterCheckedTimeArithmeticFunctions(registry);
}
return RegisterUncheckedTimeArithmeticFunctions(registry);
} | TEST(RegisterTimeFunctions, MathOperatorsRegistered) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTimeFunctions(registry, options));
auto registered_functions = registry.ListFunctions();
EXPECT_THAT(registered_functions[builtin::kAdd],
UnorderedElementsAre(
MatchesOperatorDescriptor(builtin::kAdd, Kind::kDuration,
Kind::kDuration),
MatchesOperatorDescriptor(builtin::kAdd, Kind::kTimestamp,
Kind::kDuration),
MatchesOperatorDescriptor(builtin::kAdd, Kind::kDuration,
Kind::kTimestamp)));
EXPECT_THAT(registered_functions[builtin::kSubtract],
UnorderedElementsAre(
MatchesOperatorDescriptor(builtin::kSubtract, Kind::kDuration,
Kind::kDuration),
MatchesOperatorDescriptor(builtin::kSubtract,
Kind::kTimestamp, Kind::kDuration),
MatchesOperatorDescriptor(
builtin::kSubtract, Kind::kTimestamp, Kind::kTimestamp)));
}
TEST(RegisterTimeFunctions, AccessorsRegistered) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTimeFunctions(registry, options));
auto registered_functions = registry.ListFunctions();
EXPECT_THAT(
registered_functions[builtin::kFullYear],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kFullYear, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kFullYear, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDate],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDate, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDate, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kMonth],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kMonth, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kMonth, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDayOfYear],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDayOfYear, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDayOfYear, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDayOfMonth],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDayOfMonth, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDayOfMonth, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDayOfWeek],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDayOfWeek, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDayOfWeek, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kHours],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kHours, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kHours, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kHours, Kind::kDuration)));
EXPECT_THAT(
registered_functions[builtin::kMinutes],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kMinutes, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kMinutes, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kMinutes, Kind::kDuration)));
EXPECT_THAT(
registered_functions[builtin::kSeconds],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kSeconds, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kSeconds, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kSeconds, Kind::kDuration)));
EXPECT_THAT(
registered_functions[builtin::kMilliseconds],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kMilliseconds, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kMilliseconds, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kMilliseconds, Kind::kDuration)));
} |
#include "tensorflow/lite/simple_planner.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
namespace {
constexpr int32_t kNodeNotAssigned = std::numeric_limits<int32_t>::max();
}
SimplePlanner::SimplePlanner(TfLiteContext* context,
std::unique_ptr<GraphInfo> graph_info)
: context_(context), graph_info_(std::move(graph_info)) {}
SimplePlanner::~SimplePlanner() { FreeAllAllocations(); }
void SimplePlanner::FreeAllAllocations() {
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
allocs_[i].free();
}
}
TfLiteStatus SimplePlanner::ResetAllocations() {
FreeAllAllocations();
allocs_.clear();
allocs_.resize(graph_info_->num_tensors());
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ResetAllocationsAfter(int node) {
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
if (allocs_[i].node > node && allocs_[i].size > 0) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].free();
tensor.data.raw = nullptr;
}
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::PlanAllocations() {
TF_LITE_ENSURE_STATUS(ResetAllocations());
alloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
dealloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
std::vector<int> refcounts(graph_info_->num_tensors(), 0);
auto allocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] != kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
alloc_node_[tensor] = node;
return kTfLiteOk;
};
auto deallocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] == kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
dealloc_node_[tensor] = node;
return kTfLiteOk;
};
for (int tensor_index : graph_info_->outputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
}
}
for (int tensor_index : graph_info_->variables()) {
refcounts[tensor_index]++;
TF_LITE_ENSURE(context_, tensor_index != kTfLiteOptionalTensor);
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
for (int tensor_index : graph_info_->inputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
}
const size_t num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
}
}
}
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_outputs = node.outputs;
for (int j = 0; j < node_outputs->size; ++j) {
int tensor_index = node_outputs->data[j];
TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
}
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]--;
if (refcounts[tensor_index] == 0) {
TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
}
}
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ExecuteAllocations(int first_node, int last_node) {
alloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
dealloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
allocs_.resize(graph_info_->num_tensors());
const size_t num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = first_node;
i <= static_cast<size_t>(last_node) && i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_temporaries = node.temporaries;
for (int j = 0; j < node_temporaries->size; ++j) {
int tensor_index = node_temporaries->data[j];
alloc_node_[tensor_index] = i;
dealloc_node_[tensor_index] = i;
}
}
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
bool allocated = false;
if (alloc_node_[i] >= first_node && alloc_node_[i] <= last_node) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[i].size != 0) {
allocs_[i].free();
}
allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
} else if (tensor.allocation_type == kTfLiteArenaRwPersistent &&
allocs_[i].size == 0) {
allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
}
}
if (allocated) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ReleaseNonPersistentMemory() {
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].free();
tensor.data.raw = nullptr;
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::AcquireNonPersistentMemory() {
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ResolveTensorAllocation(int tensor_index) {
TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size != 0) {
tensor.data.raw = allocs_[tensor_index].ptr;
}
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
tensor.data.raw = allocs_[tensor_index].ptr;
}
return kTfLiteOk;
}
} | #include "tensorflow/lite/simple_planner.h"
#include <algorithm>
#include <cstdarg>
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
namespace {
class TestOp {
public:
TestOp(std::initializer_list<int> inputs, std::initializer_list<int> outputs,
std::initializer_list<int> temporaries)
: inputs_(inputs), outputs_(outputs), temporaries_(temporaries) {}
const std::vector<int>& inputs() const { return inputs_; }
const std::vector<int>& outputs() const { return outputs_; }
const std::vector<int>& temporaries() const { return temporaries_; }
const TfLiteRegistration& registration() const { return registration_; }
private:
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> temporaries_;
TfLiteRegistration registration_{};
};
class TestGraph {
public:
TestGraph(std::initializer_list<int> inputs,
std::initializer_list<TestOp> nodes,
std::initializer_list<int> outputs)
: inputs_(inputs), outputs_(outputs) {
int max_tensor_index = 0;
for (int t : inputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (int t : outputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (const auto& node : nodes) {
auto int_array = [](const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
};
registrations_.push_back(node.registration());
nodes_.push_back(TfLiteNode());
nodes_.back().inputs = int_array(node.inputs());
for (int t : node.inputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().outputs = int_array(node.outputs());
for (int t : node.outputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().temporaries = int_array(node.temporaries());
for (int t : node.temporaries()) {
max_tensor_index = std::max(max_tensor_index, t);
}
}
for (int i = 0; i <= max_tensor_index; ++i) {
tensors_.push_back(TfLiteTensor());
tensors_.back().allocation_type = kTfLiteArenaRw;
tensors_.back().bytes = (i + 1) * 3;
}
}
~TestGraph() {
for (auto node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
TfLiteIntArrayFree(node.temporaries);
}
}
const std::vector<TfLiteNode>& nodes() { return nodes_; }
std::vector<TfLiteTensor>* tensors() { return &tensors_; }
const std::vector<int>& inputs() { return inputs_; }
const std::vector<int>& outputs() { return outputs_; }
const std::vector<int>& variables() { return variables_; }
const std::vector<TfLiteRegistration>& registrations() {
return registrations_;
}
void SetVariables(const std::vector<int>& variables) {
variables_ = variables;
}
void Swap(TestGraph* other) {
std::swap(nodes_, other->nodes_);
std::swap(tensors_, other->tensors_);
std::swap(inputs_, other->inputs_);
std::swap(outputs_, other->outputs_);
std::swap(variables_, other->variables_);
}
private:
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<TfLiteRegistration> registrations_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
};
class TestGraphInfo : public GraphInfo {
public:
explicit TestGraphInfo(TestGraph* graph) : graph_(graph) {}
size_t num_tensors() const override { return graph_->tensors()->size(); }
const TfLiteRegistration& registration(size_t index) const override {
return graph_->registrations()[index];
}
TfLiteTensor* tensor(size_t index) override {
return &graph_->tensors()->at(index);
}
TfLiteTensor* tensors() override { return graph_->tensors()->data(); }
size_t num_execution_nodes() const override { return graph_->nodes().size(); }
size_t num_total_nodes() const override { return graph_->nodes().size(); }
const TfLiteNode& node(size_t index) const override {
return graph_->nodes()[index];
}
size_t node_index(size_t index) const override { return index; }
const std::vector<int>& inputs() const override { return graph_->inputs(); }
const std::vector<int>& outputs() const override { return graph_->outputs(); }
const std::vector<int>& variables() const override {
return graph_->variables();
}
private:
TestGraph* graph_;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
LOG(INFO) << temp_buffer;
}
class SimplePlannerTest : public ::testing::Test {
protected:
void SetGraph(TestGraph* graph, bool preserve_all_tensors = false) {
graph_ = graph;
context_.ReportError = ReportError;
planner_ = std::make_unique<SimplePlanner>(
&context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)));
CHECK(planner_->ResetAllocations() == kTfLiteOk);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void SwapGraph(TestGraph* graph) {
graph_->Swap(graph);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void Execute(int start, int end) {
CHECK(planner_->ExecuteAllocations(start, end) == kTfLiteOk);
}
void ReleaseNonPersistentMemory() {
CHECK(planner_->ReleaseNonPersistentMemory() == kTfLiteOk);
}
void AcquireNonPersistentMemory() {
CHECK(planner_->AcquireNonPersistentMemory() == kTfLiteOk);
}
void ResetAllocationsAfter(int node) {
CHECK(planner_->ResetAllocationsAfter(node) == kTfLiteOk);
}
bool HasNonPersistentMemory() {
return planner_ && planner_->HasNonPersistentMemory();
}
bool IsAllocated(int tensor_index) {
return (*graph_->tensors())[tensor_index].data.raw != nullptr;
}
TfLiteContext context_;
TestGraph* graph_;
std::unique_ptr<SimplePlanner> planner_;
};
TEST_F(SimplePlannerTest, EmptyGraph) {
TestGraph graph({}, {}, {});
SetGraph(&graph);
Execute(0, 10);
}
TEST_F(SimplePlannerTest, GraphWithNoOps) {
TestGraph graph({0, 10}, {}, {5, 11});
SetGraph(&graph);
Execute(0, 10);
EXPECT_FALSE(IsAllocated(5));
EXPECT_FALSE(IsAllocated(11));
}
TEST_F(SimplePlannerTest, ZeroSizedTensors) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
(*graph.tensors())[1].bytes = 0;
SetGraph(&graph);
ASSERT_EQ(planner_->ExecuteAllocations(0, 10), kTfLiteOk);
EXPECT_FALSE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
}
TEST_F(SimplePlannerTest, SimpleGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphInputsPreserved) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithTemporary) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
ResetAllocationsAfter(0);
EXPECT_TRUE(IsAllocated(0));
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_FALSE(IsAllocated(3));
EXPECT_FALSE(IsAllocated(4));
EXPECT_FALSE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithPersistentResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
(*graph.tensors())[5].allocation_type = kTfLiteArenaRwPersistent;
SetGraph(&graph);
Execute(0, 10);
void* tensor5_ptr = (*graph.tensors())[5].data.raw;
ResetAllocationsAfter(0);
EXPECT_TRUE(IsAllocated(0));
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_FALSE(IsAllocated(3));
EXPECT_FALSE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
Execute(0, 10);
EXPECT_TRUE(tensor5_ptr == (*graph.tensors())[5].data.raw);
}
TEST_F(SimplePlannerTest, SimpleGraphOptionalOutput) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{-1, 3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
}
} | TfLiteStatus SimplePlanner::ResetAllocationsAfter(int node) {
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
if (allocs_[i].node > node && allocs_[i].size > 0) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].free();
tensor.data.raw = nullptr;
}
}
}
return kTfLiteOk;
} | TEST_F(SimplePlannerTest, SimpleGraphWithResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
ResetAllocationsAfter(0);
EXPECT_TRUE(IsAllocated(0));
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_FALSE(IsAllocated(3));
EXPECT_FALSE(IsAllocated(4));
EXPECT_FALSE(IsAllocated(5));
} |
#include "xla/service/map_inliner.h"
#include <memory>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
class MapInlinerVisitor : public DfsHloVisitorWithDefault {
public:
explicit MapInlinerVisitor(HloComputation* computation)
: computation_(computation) {}
absl::Status DefaultAction(HloInstruction* ) override {
return absl::OkStatus();
}
absl::Status HandleMap(HloInstruction* map) override;
absl::StatusOr<bool> Run(HloComputation* computation);
private:
HloComputation* computation_;
bool changed_ = false;
};
absl::StatusOr<bool> MapInlinerVisitor::Run(HloComputation* computation) {
changed_ = false;
computation_ = computation;
TF_RETURN_IF_ERROR(computation->root_instruction()->Accept(this));
return changed_;
}
absl::Status MapInlinerVisitor::HandleMap(HloInstruction* map) {
HloComputation* function = map->to_apply();
HloInstruction& root = *function->root_instruction();
if (hlo_query::AllOperandsAreParameters(root)) {
if (root.opcode() == HloOpcode::kFusion) {
return absl::OkStatus();
}
VLOG(10) << "inlining map({X ... Y}, op) => : op(X ... Y) with function "
<< root.ToShortString();
if (root.opcode() == HloOpcode::kParameter) {
TF_RETURN_IF_ERROR(
map->ReplaceAllUsesWith(map->operands()[root.parameter_number()]));
TF_RETURN_IF_ERROR(computation_->RemoveInstruction(map));
} else if (root.opcode() == HloOpcode::kConstant) {
HloInstruction* constant = computation_->AddInstruction(root.Clone());
HloInstruction* placed_instruction = computation_->AddInstruction(
HloInstruction::CreateBroadcast(map->shape(), constant, {}));
TF_RETURN_IF_ERROR(
computation_->ReplaceInstruction(map, placed_instruction));
} else {
std::vector<HloInstruction*> params;
for (int64_t o = 0; o < root.operands().size(); o++) {
params.push_back(map->operands()[root.operand(o)->parameter_number()]);
}
HloInstruction* placed_instruction = computation_->AddInstruction(
root.CloneWithNewOperands(map->shape(), params));
TF_RETURN_IF_ERROR(
computation_->ReplaceInstruction(map, placed_instruction));
}
changed_ = true;
return absl::OkStatus();
}
return absl::OkStatus();
}
absl::StatusOr<bool> MapInliner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
MapInlinerVisitor visitor(nullptr);
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool computation_changed, visitor.Run(computation));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/map_inliner.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using MapInlinerTest = HloTestBase;
TEST_F(MapInlinerTest, MapMax) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto max_builder = HloComputation::Builder(TestName());
auto param1 = max_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto param2 = max_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "y"));
max_builder.AddInstruction(HloInstruction::CreateBinary(
param1->shape(), HloOpcode::kMaximum, param1, param2));
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapMaxFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(max_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),
op::Maximum(lhs, rhs));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR1<float>({4, 3, 3, 4});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapConstant) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto const2_builder = HloComputation::Builder(TestName());
auto param1 = const2_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
(void)param1;
const2_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto const2_f32 = const2_builder.Build();
auto builder = HloComputation::Builder("MapConstFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1, 2, 3, 4}, {5, 6, 7, 8}})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs}, const2_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(const2_f32));
hlo_module->AddEntryComputation(std::move(computation));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
root = hlo_module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Broadcast(op::Constant()));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR2<float>({{2, 2, 2, 2}, {2, 2, 2, 2}});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapSubtractOppositeOrder) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto max_builder = HloComputation::Builder(TestName());
auto param1 = max_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "x"));
auto param2 = max_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "y"));
max_builder.AddInstruction(HloInstruction::CreateBinary(
param1->shape(), HloOpcode::kSubtract, param1, param2));
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapSubFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(max_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),
op::Subtract(rhs, lhs));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR1<float>({3, 1, -1, -3});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapParameter) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto param_builder = HloComputation::Builder(TestName());
param_builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32, "p0"));
param_builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32, "p1"));
auto param_f32 = param_builder.Build();
auto builder = HloComputation::Builder("MapParamFunction");
auto lhs = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1)));
auto rhs = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4)));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, param_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(param_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(), rhs);
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR0<float>(4);
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
}
} | absl::StatusOr<bool> MapInliner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
MapInlinerVisitor visitor(nullptr);
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool computation_changed, visitor.Run(computation));
changed |= computation_changed;
}
return changed;
} | TEST_F(MapInlinerTest, MapMax) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto max_builder = HloComputation::Builder(TestName());
auto param1 = max_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto param2 = max_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "y"));
max_builder.AddInstruction(HloInstruction::CreateBinary(
param1->shape(), HloOpcode::kMaximum, param1, param2));
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapMaxFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(max_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),
op::Maximum(lhs, rhs));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR1<float>({4, 3, 3, 4});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapConstant) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto const2_builder = HloComputation::Builder(TestName());
auto param1 = const2_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
(void)param1;
const2_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto const2_f32 = const2_builder.Build();
auto builder = HloComputation::Builder("MapConstFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1, 2, 3, 4}, {5, 6, 7, 8}})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs}, const2_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(const2_f32));
hlo_module->AddEntryComputation(std::move(computation));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
root = hlo_module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Broadcast(op::Constant()));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR2<float>({{2, 2, 2, 2}, {2, 2, 2, 2}});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapSubtractOppositeOrder) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto max_builder = HloComputation::Builder(TestName());
auto param1 = max_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "x"));
auto param2 = max_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "y"));
max_builder.AddInstruction(HloInstruction::CreateBinary(
param1->shape(), HloOpcode::kSubtract, param1, param2));
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapSubFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(max_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),
op::Subtract(rhs, lhs));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR1<float>({3, 1, -1, -3});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapParameter) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto param_builder = HloComputation::Builder(TestName());
param_builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32, "p0"));
param_builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32, "p1"));
auto param_f32 = param_builder.Build();
auto builder = HloComputation::Builder("MapParamFunction");
auto lhs = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1)));
auto rhs = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4)));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, param_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(param_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(), rhs);
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR0<float>(4);
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
} |
#include "absl/strings/charconv.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <system_error>
#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/nullability.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/internal/charconv_bigint.h"
#include "absl/strings/internal/charconv_parse.h"
#ifdef ABSL_BIT_PACK_FLOATS
#error ABSL_BIT_PACK_FLOATS cannot be directly set
#elif defined(__x86_64__) || defined(_M_X64)
#define ABSL_BIT_PACK_FLOATS 1
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
template <typename FloatType>
struct FloatTraits;
template <>
struct FloatTraits<double> {
using mantissa_t = uint64_t;
static constexpr int kTargetBits = 64;
static constexpr int kTargetExponentBits = 11;
static constexpr int kTargetMantissaBits = 53;
static constexpr int kMaxExponent = 971;
static constexpr int kMinNormalExponent = -1074;
static constexpr int kExponentBias = 1023;
static constexpr int kEiselLemireShift = 9;
static constexpr uint64_t kEiselLemireMask = uint64_t{0x1FF};
static constexpr int kEiselLemireMinInclusiveExp10 = -324 - 18;
static constexpr int kEiselLemireMaxExclusiveExp10 = 309;
static double MakeNan(absl::Nonnull<const char*> tagp) {
#if ABSL_HAVE_BUILTIN(__builtin_nan)
return __builtin_nan(tagp);
#else
using namespace std;
return nan(tagp);
#endif
}
static double Make(mantissa_t mantissa, int exponent, bool sign) {
#ifndef ABSL_BIT_PACK_FLOATS
using namespace std;
return sign ? -ldexp(mantissa, exponent) : ldexp(mantissa, exponent);
#else
constexpr uint64_t kMantissaMask =
(uint64_t{1} << (kTargetMantissaBits - 1)) - 1;
uint64_t dbl = static_cast<uint64_t>(sign) << 63;
if (mantissa > kMantissaMask) {
dbl += static_cast<uint64_t>(exponent + 1023 + kTargetMantissaBits - 1)
<< 52;
mantissa &= kMantissaMask;
} else {
assert(exponent == kMinNormalExponent);
}
dbl += mantissa;
return absl::bit_cast<double>(dbl);
#endif
}
};
template <>
struct FloatTraits<float> {
using mantissa_t = uint32_t;
static constexpr int kTargetBits = 32;
static constexpr int kTargetExponentBits = 8;
static constexpr int kTargetMantissaBits = 24;
static constexpr int kMaxExponent = 104;
static constexpr int kMinNormalExponent = -149;
static constexpr int kExponentBias = 127;
static constexpr int kEiselLemireShift = 38;
static constexpr uint64_t kEiselLemireMask = uint64_t{0x3FFFFFFFFF};
static constexpr int kEiselLemireMinInclusiveExp10 = -46 - 18;
static constexpr int kEiselLemireMaxExclusiveExp10 = 39;
static float MakeNan(absl::Nonnull<const char*> tagp) {
#if ABSL_HAVE_BUILTIN(__builtin_nanf)
return __builtin_nanf(tagp);
#else
using namespace std;
return std::nanf(tagp);
#endif
}
static float Make(mantissa_t mantissa, int exponent, bool sign) {
#ifndef ABSL_BIT_PACK_FLOATS
using namespace std;
return sign ? -ldexpf(mantissa, exponent) : ldexpf(mantissa, exponent);
#else
constexpr uint32_t kMantissaMask =
(uint32_t{1} << (kTargetMantissaBits - 1)) - 1;
uint32_t flt = static_cast<uint32_t>(sign) << 31;
if (mantissa > kMantissaMask) {
flt += static_cast<uint32_t>(exponent + 127 + kTargetMantissaBits - 1)
<< 23;
mantissa &= kMantissaMask;
} else {
assert(exponent == kMinNormalExponent);
}
flt += mantissa;
return absl::bit_cast<float>(flt);
#endif
}
};
extern const uint64_t kPower10MantissaHighTable[];
extern const uint64_t kPower10MantissaLowTable[];
constexpr int kPower10TableMinInclusive = -342;
constexpr int kPower10TableMaxExclusive = 309;
uint64_t Power10Mantissa(int n) {
return kPower10MantissaHighTable[n - kPower10TableMinInclusive];
}
int Power10Exponent(int n) {
return (217706 * n >> 16) - 63;
}
bool Power10Overflow(int n) { return n >= kPower10TableMaxExclusive; }
bool Power10Underflow(int n) { return n < kPower10TableMinInclusive; }
bool Power10Exact(int n) { return n >= 0 && n <= 27; }
constexpr int kOverflow = 99999;
constexpr int kUnderflow = -99999;
struct CalculatedFloat {
uint64_t mantissa = 0;
int exponent = 0;
};
int BitWidth(uint128 value) {
if (Uint128High64(value) == 0) {
return static_cast<int>(bit_width(Uint128Low64(value)));
}
return 128 - countl_zero(Uint128High64(value));
}
template <typename FloatType>
int NormalizedShiftSize(int mantissa_width, int binary_exponent) {
const int normal_shift =
mantissa_width - FloatTraits<FloatType>::kTargetMantissaBits;
const int minimum_shift =
FloatTraits<FloatType>::kMinNormalExponent - binary_exponent;
return std::max(normal_shift, minimum_shift);
}
int TruncateToBitWidth(int bit_width, absl::Nonnull<uint128*> value) {
const int current_bit_width = BitWidth(*value);
const int shift = current_bit_width - bit_width;
*value >>= shift;
return shift;
}
template <typename FloatType>
bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
absl::Nonnull<FloatType*> value) {
if (input.type == strings_internal::FloatType::kNan) {
constexpr ptrdiff_t kNanBufferSize = 128;
#if (defined(__GNUC__) && !defined(__clang__)) || \
(defined(__clang__) && __clang_major__ < 7)
volatile char n_char_sequence[kNanBufferSize];
#else
char n_char_sequence[kNanBufferSize];
#endif
if (input.subrange_begin == nullptr) {
n_char_sequence[0] = '\0';
} else {
ptrdiff_t nan_size = input.subrange_end - input.subrange_begin;
nan_size = std::min(nan_size, kNanBufferSize - 1);
std::copy_n(input.subrange_begin, nan_size, n_char_sequence);
n_char_sequence[nan_size] = '\0';
}
char* nan_argument = const_cast<char*>(n_char_sequence);
*value = negative ? -FloatTraits<FloatType>::MakeNan(nan_argument)
: FloatTraits<FloatType>::MakeNan(nan_argument);
return true;
}
if (input.type == strings_internal::FloatType::kInfinity) {
*value = negative ? -std::numeric_limits<FloatType>::infinity()
: std::numeric_limits<FloatType>::infinity();
return true;
}
if (input.mantissa == 0) {
*value = negative ? -0.0 : 0.0;
return true;
}
return false;
}
template <typename FloatType>
void EncodeResult(const CalculatedFloat& calculated, bool negative,
absl::Nonnull<absl::from_chars_result*> result,
absl::Nonnull<FloatType*> value) {
if (calculated.exponent == kOverflow) {
result->ec = std::errc::result_out_of_range;
*value = negative ? -std::numeric_limits<FloatType>::max()
: std::numeric_limits<FloatType>::max();
return;
} else if (calculated.mantissa == 0 || calculated.exponent == kUnderflow) {
result->ec = std::errc::result_out_of_range;
*value = negative ? -0.0 : 0.0;
return;
}
*value = FloatTraits<FloatType>::Make(
static_cast<typename FloatTraits<FloatType>::mantissa_t>(
calculated.mantissa),
calculated.exponent, negative);
}
uint64_t ShiftRightAndRound(uint128 value, int shift, bool input_exact,
absl::Nonnull<bool*> output_exact) {
if (shift <= 0) {
*output_exact = input_exact;
return static_cast<uint64_t>(value << -shift);
}
if (shift >= 128) {
*output_exact = true;
return 0;
}
*output_exact = true;
const uint128 shift_mask = (uint128(1) << shift) - 1;
const uint128 halfway_point = uint128(1) << (shift - 1);
const uint128 shifted_bits = value & shift_mask;
value >>= shift;
if (shifted_bits > halfway_point) {
return static_cast<uint64_t>(value + 1);
}
if (shifted_bits == halfway_point) {
if ((value & 1) == 1 || !input_exact) {
++value;
}
return static_cast<uint64_t>(value);
}
if (!input_exact && shifted_bits == halfway_point - 1) {
*output_exact = false;
}
return static_cast<uint64_t>(value);
}
bool MustRoundUp(uint64_t guess_mantissa, int guess_exponent,
const strings_internal::ParsedFloat& parsed_decimal) {
absl::strings_internal::BigUnsigned<84> exact_mantissa;
int exact_exponent = exact_mantissa.ReadFloatMantissa(parsed_decimal, 768);
guess_mantissa = guess_mantissa * 2 + 1;
guess_exponent -= 1;
absl::strings_internal::BigUnsigned<84>& lhs = exact_mantissa;
int comparison;
if (exact_exponent >= 0) {
lhs.MultiplyByFiveToTheNth(exact_exponent);
absl::strings_internal::BigUnsigned<84> rhs(guess_mantissa);
if (exact_exponent > guess_exponent) {
lhs.ShiftLeft(exact_exponent - guess_exponent);
} else {
rhs.ShiftLeft(guess_exponent - exact_exponent);
}
comparison = Compare(lhs, rhs);
} else {
absl::strings_internal::BigUnsigned<84> rhs =
absl::strings_internal::BigUnsigned<84>::FiveToTheNth(-exact_exponent);
rhs.MultiplyBy(guess_mantissa);
if (exact_exponent > guess_exponent) {
lhs.ShiftLeft(exact_exponent - guess_exponent);
} else {
rhs.ShiftLeft(guess_exponent - exact_exponent);
}
comparison = Compare(lhs, rhs);
}
if (comparison < 0) {
return false;
} else if (comparison > 0) {
return true;
} else {
return (guess_mantissa & 2) == 2;
}
}
template <typename FloatType>
CalculatedFloat CalculatedFloatFromRawValues(uint64_t mantissa, int exponent) {
CalculatedFloat result;
if (mantissa == uint64_t{1} << FloatTraits<FloatType>::kTargetMantissaBits) {
mantissa >>= 1;
exponent += 1;
}
if (exponent > FloatTraits<FloatType>::kMaxExponent) {
result.exponent = kOverflow;
} else if (mantissa == 0) {
result.exponent = kUnderflow;
} else {
result.exponent = exponent;
result.mantissa = mantissa;
}
return result;
}
template <typename FloatType>
CalculatedFloat CalculateFromParsedHexadecimal(
const strings_internal::ParsedFloat& parsed_hex) {
uint64_t mantissa = parsed_hex.mantissa;
int exponent = parsed_hex.exponent;
int mantissa_width = static_cast<int>(bit_width(mantissa));
const int shift = NormalizedShiftSize<FloatType>(mantissa_width, exponent);
bool result_exact;
exponent += shift;
mantissa = ShiftRightAndRound(mantissa, shift,
true, &result_exact);
return CalculatedFloatFromRawValues<FloatType>(mantissa, exponent);
}
template <typename FloatType>
CalculatedFloat CalculateFromParsedDecimal(
const strings_internal::ParsedFloat& parsed_decimal) {
CalculatedFloat result;
if (Power10Underflow(parsed_decimal.exponent)) {
result.exponent = kUnderflow;
return result;
} else if (Power10Overflow(parsed_decimal.exponent)) {
result.exponent = kOverflow;
return result;
}
uint128 wide_binary_mantissa = parsed_decimal.mantissa;
wide_binary_mantissa *= Power10Mantissa(parsed_decimal.exponent);
int binary_exponent = Power10Exponent(parsed_decimal.exponent);
bool mantissa_exact;
int mantissa_width;
if (parsed_decimal.subrange_begin) {
mantissa_width = 58;
mantissa_exact = false;
binary_exponent +=
TruncateToBitWidth(mantissa_width, &wide_binary_mantissa);
} else if (!Power10Exact(parsed_decimal.exponent)) {
mantissa_width = 63;
mantissa_exact = false;
binary_exponent +=
TruncateToBitWidth(mantissa_width, &wide_binary_mantissa);
} else {
mantissa_width = BitWidth(wide_binary_mantissa);
mantissa_exact = true;
}
const int shift =
NormalizedShiftSize<FloatType>(mantissa_width, binary_exponent);
bool result_exact;
binary_exponent += shift;
uint64_t binary_mantissa = ShiftRightAndRound(wide_binary_mantissa, shift,
mantissa_exact, &result_exact);
if (!result_exact) {
if (MustRoundUp(binary_mantissa, binary_exponent, parsed_decimal)) {
binary_mantissa += 1;
}
}
return CalculatedFloatFromRawValues<FloatType>(binary_mantissa,
binary_exponent);
}
template <typename FloatType>
bool EiselLemire(const strings_internal::ParsedFloat& input, bool negative,
absl::Nonnull<FloatType*> value,
absl::Nonnull<std::errc*> ec) {
uint64_t man = input.mantissa;
int exp10 = input.exponent;
if (exp10 < FloatTraits<FloatType>::kEiselLemireMinInclusiveExp10) {
*value = negative ? -0.0 : 0.0;
*ec = std::errc::result_out_of_range;
return true;
} else if (exp10 >= FloatTraits<FloatType>::kEiselLemireMaxExclusiveExp10) {
*value = negative ? -std::numeric_limits<FloatType>::max()
: std::numeric_limits<FloatType>::max();
*ec = std::errc::result_out_of_range;
return true;
}
static_assert(
FloatTraits<FloatType>::kEiselLemireMinInclusiveExp10 >=
kPower10TableMinInclusive,
"(exp10-kPower10TableMinInclusive) in kPower10MantissaHighTable bounds");
static_assert(
FloatTraits<FloatType>::kEiselLemireMaxExclusiveExp10 <=
kPower10TableMaxExclusive,
"(exp10-kPower10TableMinInclusive) in kPower10MantissaHighTable bounds");
int clz = countl_zero(man);
man <<= static_cast<unsigned int>(clz);
uint64_t ret_exp2 =
static_cast<uint64_t>((217706 * exp10 >> 16) + 64 +
FloatTraits<FloatType>::kExponentBias - clz);
uint128 x = static_cast<uint128>(man) *
static_cast<uint128>(
kPower10MantissaHighTable[exp10 - kPower10TableMinInclusive]);
static constexpr uint64_t high64_mask =
FloatTraits<FloatType>::kEiselLemireMask;
if (((Uint128High64(x) & high64_mask) == high64_mask) &&
(man > (std::numeric_limits<uint64_t>::max() - Uint128Low64(x)))) {
uint128 y =
static_cast<uint128>(man) *
static_cast<uint128>(
kPower10MantissaLowTable[exp10 - kPower10TableMinInclusive]);
x += Uint128High64(y);
if (((Uint128High64(x) & high64_mask) == high64_mask) &&
((Uint128Low64(x) + 1) == 0) &&
(man > (std::numeric_limits<uint64_t>::max() - Uint128Low64(y)))) {
return false;
}
}
uint64_t msb = Uint128High64(x) >> 63;
uint64_t ret_man =
Uint128High64(x) >> (msb + FloatTraits<FloatType>::kEiselLemireShift);
ret_exp2 -= 1 ^ msb;
if ((Uint128Low64(x) == 0) && ((Uint128High64(x) & high64_mask) == 0) &&
((ret_man & 3) == 1)) {
return false;
}
ret | #include "absl/strings/charconv.h"
#include <cfloat>
#include <cmath>
#include <cstdlib>
#include <functional>
#include <limits>
#include <string>
#include <system_error>
#include "gtest/gtest.h"
#include "absl/strings/internal/pow10_helper.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#ifdef _MSC_FULL_VER
#define ABSL_COMPILER_DOES_EXACT_ROUNDING 0
#define ABSL_STRTOD_HANDLES_NAN_CORRECTLY 0
#else
#define ABSL_COMPILER_DOES_EXACT_ROUNDING 1
#define ABSL_STRTOD_HANDLES_NAN_CORRECTLY 1
#endif
namespace {
using absl::strings_internal::Pow10;
#if ABSL_COMPILER_DOES_EXACT_ROUNDING
void TestDoubleParse(absl::string_view str, double expected_number) {
SCOPED_TRACE(str);
double actual_number = 0.0;
absl::from_chars_result result =
absl::from_chars(str.data(), str.data() + str.length(), actual_number);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(result.ptr, str.data() + str.length());
EXPECT_EQ(actual_number, expected_number);
}
void TestFloatParse(absl::string_view str, float expected_number) {
SCOPED_TRACE(str);
float actual_number = 0.0;
absl::from_chars_result result =
absl::from_chars(str.data(), str.data() + str.length(), actual_number);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(result.ptr, str.data() + str.length());
EXPECT_EQ(actual_number, expected_number);
}
#define FROM_CHARS_TEST_DOUBLE(number) \
{ \
TestDoubleParse(#number, number); \
TestDoubleParse("-" #number, -number); \
}
#define FROM_CHARS_TEST_FLOAT(number) \
{ \
TestFloatParse(#number, number##f); \
TestFloatParse("-" #number, -number##f); \
}
TEST(FromChars, NearRoundingCases) {
FROM_CHARS_TEST_DOUBLE(5.e125);
FROM_CHARS_TEST_DOUBLE(69.e267);
FROM_CHARS_TEST_DOUBLE(999.e-026);
FROM_CHARS_TEST_DOUBLE(7861.e-034);
FROM_CHARS_TEST_DOUBLE(75569.e-254);
FROM_CHARS_TEST_DOUBLE(928609.e-261);
FROM_CHARS_TEST_DOUBLE(9210917.e080);
FROM_CHARS_TEST_DOUBLE(84863171.e114);
FROM_CHARS_TEST_DOUBLE(653777767.e273);
FROM_CHARS_TEST_DOUBLE(5232604057.e-298);
FROM_CHARS_TEST_DOUBLE(27235667517.e-109);
FROM_CHARS_TEST_DOUBLE(653532977297.e-123);
FROM_CHARS_TEST_DOUBLE(3142213164987.e-294);
FROM_CHARS_TEST_DOUBLE(46202199371337.e-072);
FROM_CHARS_TEST_DOUBLE(231010996856685.e-073);
FROM_CHARS_TEST_DOUBLE(9324754620109615.e212);
FROM_CHARS_TEST_DOUBLE(78459735791271921.e049);
FROM_CHARS_TEST_DOUBLE(272104041512242479.e200);
FROM_CHARS_TEST_DOUBLE(6802601037806061975.e198);
FROM_CHARS_TEST_DOUBLE(20505426358836677347.e-221);
FROM_CHARS_TEST_DOUBLE(836168422905420598437.e-234);
FROM_CHARS_TEST_DOUBLE(4891559871276714924261.e222);
FROM_CHARS_TEST_FLOAT(5.e-20);
FROM_CHARS_TEST_FLOAT(67.e14);
FROM_CHARS_TEST_FLOAT(985.e15);
FROM_CHARS_TEST_FLOAT(7693.e-42);
FROM_CHARS_TEST_FLOAT(55895.e-16);
FROM_CHARS_TEST_FLOAT(996622.e-44);
FROM_CHARS_TEST_FLOAT(7038531.e-32);
FROM_CHARS_TEST_FLOAT(60419369.e-46);
FROM_CHARS_TEST_FLOAT(702990899.e-20);
FROM_CHARS_TEST_FLOAT(6930161142.e-48);
FROM_CHARS_TEST_FLOAT(25933168707.e-13);
FROM_CHARS_TEST_FLOAT(596428896559.e20);
FROM_CHARS_TEST_DOUBLE(9.e-265);
FROM_CHARS_TEST_DOUBLE(85.e-037);
FROM_CHARS_TEST_DOUBLE(623.e100);
FROM_CHARS_TEST_DOUBLE(3571.e263);
FROM_CHARS_TEST_DOUBLE(81661.e153);
FROM_CHARS_TEST_DOUBLE(920657.e-023);
FROM_CHARS_TEST_DOUBLE(4603285.e-024);
FROM_CHARS_TEST_DOUBLE(87575437.e-309);
FROM_CHARS_TEST_DOUBLE(245540327.e122);
FROM_CHARS_TEST_DOUBLE(6138508175.e120);
FROM_CHARS_TEST_DOUBLE(83356057653.e193);
FROM_CHARS_TEST_DOUBLE(619534293513.e124);
FROM_CHARS_TEST_DOUBLE(2335141086879.e218);
FROM_CHARS_TEST_DOUBLE(36167929443327.e-159);
FROM_CHARS_TEST_DOUBLE(609610927149051.e-255);
FROM_CHARS_TEST_DOUBLE(3743626360493413.e-165);
FROM_CHARS_TEST_DOUBLE(94080055902682397.e-242);
FROM_CHARS_TEST_DOUBLE(899810892172646163.e283);
FROM_CHARS_TEST_DOUBLE(7120190517612959703.e120);
FROM_CHARS_TEST_DOUBLE(25188282901709339043.e-252);
FROM_CHARS_TEST_DOUBLE(308984926168550152811.e-052);
FROM_CHARS_TEST_DOUBLE(6372891218502368041059.e064);
FROM_CHARS_TEST_FLOAT(3.e-23);
FROM_CHARS_TEST_FLOAT(57.e18);
FROM_CHARS_TEST_FLOAT(789.e-35);
FROM_CHARS_TEST_FLOAT(2539.e-18);
FROM_CHARS_TEST_FLOAT(76173.e28);
FROM_CHARS_TEST_FLOAT(887745.e-11);
FROM_CHARS_TEST_FLOAT(5382571.e-37);
FROM_CHARS_TEST_FLOAT(82381273.e-35);
FROM_CHARS_TEST_FLOAT(750486563.e-38);
FROM_CHARS_TEST_FLOAT(3752432815.e-39);
FROM_CHARS_TEST_FLOAT(75224575729.e-45);
FROM_CHARS_TEST_FLOAT(459926601011.e15);
}
#undef FROM_CHARS_TEST_DOUBLE
#undef FROM_CHARS_TEST_FLOAT
#endif
float ToFloat(absl::string_view s) {
float f;
absl::from_chars(s.data(), s.data() + s.size(), f);
return f;
}
double ToDouble(absl::string_view s) {
double d;
absl::from_chars(s.data(), s.data() + s.size(), d);
return d;
}
TEST(FromChars, NearRoundingCasesExplicit) {
EXPECT_EQ(ToDouble("5.e125"), ldexp(6653062250012735, 365));
EXPECT_EQ(ToDouble("69.e267"), ldexp(4705683757438170, 841));
EXPECT_EQ(ToDouble("999.e-026"), ldexp(6798841691080350, -129));
EXPECT_EQ(ToDouble("7861.e-034"), ldexp(8975675289889240, -153));
EXPECT_EQ(ToDouble("75569.e-254"), ldexp(6091718967192243, -880));
EXPECT_EQ(ToDouble("928609.e-261"), ldexp(7849264900213743, -900));
EXPECT_EQ(ToDouble("9210917.e080"), ldexp(8341110837370930, 236));
EXPECT_EQ(ToDouble("84863171.e114"), ldexp(4625202867375927, 353));
EXPECT_EQ(ToDouble("653777767.e273"), ldexp(5068902999763073, 884));
EXPECT_EQ(ToDouble("5232604057.e-298"), ldexp(5741343011915040, -1010));
EXPECT_EQ(ToDouble("27235667517.e-109"), ldexp(6707124626673586, -380));
EXPECT_EQ(ToDouble("653532977297.e-123"), ldexp(7078246407265384, -422));
EXPECT_EQ(ToDouble("3142213164987.e-294"), ldexp(8219991337640559, -988));
EXPECT_EQ(ToDouble("46202199371337.e-072"), ldexp(5224462102115359, -246));
EXPECT_EQ(ToDouble("231010996856685.e-073"), ldexp(5224462102115359, -247));
EXPECT_EQ(ToDouble("9324754620109615.e212"), ldexp(5539753864394442, 705));
EXPECT_EQ(ToDouble("78459735791271921.e049"), ldexp(8388176519442766, 166));
EXPECT_EQ(ToDouble("272104041512242479.e200"), ldexp(5554409530847367, 670));
EXPECT_EQ(ToDouble("6802601037806061975.e198"), ldexp(5554409530847367, 668));
EXPECT_EQ(ToDouble("20505426358836677347.e-221"),
ldexp(4524032052079546, -722));
EXPECT_EQ(ToDouble("836168422905420598437.e-234"),
ldexp(5070963299887562, -760));
EXPECT_EQ(ToDouble("4891559871276714924261.e222"),
ldexp(6452687840519111, 757));
EXPECT_EQ(ToFloat("5.e-20"), ldexpf(15474250, -88));
EXPECT_EQ(ToFloat("67.e14"), ldexpf(12479722, 29));
EXPECT_EQ(ToFloat("985.e15"), ldexpf(14333636, 36));
EXPECT_EQ(ToFloat("7693.e-42"), ldexpf(10979816, -150));
EXPECT_EQ(ToFloat("55895.e-16"), ldexpf(12888509, -61));
EXPECT_EQ(ToFloat("996622.e-44"), ldexpf(14224264, -150));
EXPECT_EQ(ToFloat("7038531.e-32"), ldexpf(11420669, -107));
EXPECT_EQ(ToFloat("60419369.e-46"), ldexpf(8623340, -150));
EXPECT_EQ(ToFloat("702990899.e-20"), ldexpf(16209866, -61));
EXPECT_EQ(ToFloat("6930161142.e-48"), ldexpf(9891056, -150));
EXPECT_EQ(ToFloat("25933168707.e-13"), ldexpf(11138211, -32));
EXPECT_EQ(ToFloat("596428896559.e20"), ldexpf(12333860, 82));
EXPECT_EQ(ToDouble("9.e-265"), ldexp(8168427841980010, -930));
EXPECT_EQ(ToDouble("85.e-037"), ldexp(6360455125664090, -169));
EXPECT_EQ(ToDouble("623.e100"), ldexp(6263531988747231, 289));
EXPECT_EQ(ToDouble("3571.e263"), ldexp(6234526311072170, 833));
EXPECT_EQ(ToDouble("81661.e153"), ldexp(6696636728760206, 472));
EXPECT_EQ(ToDouble("920657.e-023"), ldexp(5975405561110124, -109));
EXPECT_EQ(ToDouble("4603285.e-024"), ldexp(5975405561110124, -110));
EXPECT_EQ(ToDouble("87575437.e-309"), ldexp(8452160731874668, -1053));
EXPECT_EQ(ToDouble("245540327.e122"), ldexp(4985336549131723, 381));
EXPECT_EQ(ToDouble("6138508175.e120"), ldexp(4985336549131723, 379));
EXPECT_EQ(ToDouble("83356057653.e193"), ldexp(5986732817132056, 625));
EXPECT_EQ(ToDouble("619534293513.e124"), ldexp(4798406992060657, 399));
EXPECT_EQ(ToDouble("2335141086879.e218"), ldexp(5419088166961646, 713));
EXPECT_EQ(ToDouble("36167929443327.e-159"), ldexp(8135819834632444, -536));
EXPECT_EQ(ToDouble("609610927149051.e-255"), ldexp(4576664294594737, -850));
EXPECT_EQ(ToDouble("3743626360493413.e-165"), ldexp(6898586531774201, -549));
EXPECT_EQ(ToDouble("94080055902682397.e-242"), ldexp(6273271706052298, -800));
EXPECT_EQ(ToDouble("899810892172646163.e283"), ldexp(7563892574477827, 947));
EXPECT_EQ(ToDouble("7120190517612959703.e120"), ldexp(5385467232557565, 409));
EXPECT_EQ(ToDouble("25188282901709339043.e-252"),
ldexp(5635662608542340, -825));
EXPECT_EQ(ToDouble("308984926168550152811.e-052"),
ldexp(5644774693823803, -157));
EXPECT_EQ(ToDouble("6372891218502368041059.e064"),
ldexp(4616868614322430, 233));
EXPECT_EQ(ToFloat("3.e-23"), ldexpf(9507380, -98));
EXPECT_EQ(ToFloat("57.e18"), ldexpf(12960300, 42));
EXPECT_EQ(ToFloat("789.e-35"), ldexpf(10739312, -130));
EXPECT_EQ(ToFloat("2539.e-18"), ldexpf(11990089, -72));
EXPECT_EQ(ToFloat("76173.e28"), ldexpf(9845130, 86));
EXPECT_EQ(ToFloat("887745.e-11"), ldexpf(9760860, -40));
EXPECT_EQ(ToFloat("5382571.e-37"), ldexpf(11447463, -124));
EXPECT_EQ(ToFloat("82381273.e-35"), ldexpf(8554961, -113));
EXPECT_EQ(ToFloat("750486563.e-38"), ldexpf(9975678, -120));
EXPECT_EQ(ToFloat("3752432815.e-39"), ldexpf(9975678, -121));
EXPECT_EQ(ToFloat("75224575729.e-45"), ldexpf(13105970, -137));
EXPECT_EQ(ToFloat("459926601011.e15"), ldexpf(12466336, 65));
}
template <typename FloatType>
void TestHalfwayValue(const std::string& mantissa, int exponent,
FloatType expected_low, FloatType expected_high,
FloatType expected_half) {
std::string low_rep = mantissa;
low_rep[low_rep.size() - 1] -= 1;
absl::StrAppend(&low_rep, std::string(1000, '9'), "e", exponent);
FloatType actual_low = 0;
absl::from_chars(low_rep.data(), low_rep.data() + low_rep.size(), actual_low);
EXPECT_EQ(expected_low, actual_low);
std::string high_rep =
absl::StrCat(mantissa, std::string(1000, '0'), "1e", exponent);
FloatType actual_high = 0;
absl::from_chars(high_rep.data(), high_rep.data() + high_rep.size(),
actual_high);
EXPECT_EQ(expected_high, actual_high);
std::string halfway_rep = absl::StrCat(mantissa, "e", exponent);
FloatType actual_half = 0;
absl::from_chars(halfway_rep.data(), halfway_rep.data() + halfway_rep.size(),
actual_half);
EXPECT_EQ(expected_half, actual_half);
}
TEST(FromChars, DoubleRounding) {
const double zero = 0.0;
const double first_subnormal = nextafter(zero, 1.0);
const double second_subnormal = nextafter(first_subnormal, 1.0);
const double first_normal = DBL_MIN;
const double last_subnormal = nextafter(first_normal, 0.0);
const double second_normal = nextafter(first_normal, 1.0);
const double last_normal = DBL_MAX;
const double penultimate_normal = nextafter(last_normal, 0.0);
TestHalfwayValue(
"2."
"470328229206232720882843964341106861825299013071623822127928412503377536"
"351043759326499181808179961898982823477228588654633283551779698981993873"
"980053909390631503565951557022639229085839244910518443593180284993653615"
"250031937045767824921936562366986365848075700158576926990370631192827955"
"855133292783433840935197801553124659726357957462276646527282722005637400"
"648549997709659947045402082816622623785739345073633900796776193057750674"
"017632467360096895134053553745851666113422376667860416215968046191446729"
"184030053005753084904876539171138659164623952491262365388187963623937328"
"042389101867234849766823508986338858792562830275599565752445550725518931"
"369083625477918694866799496832404970582102851318545139621383772282614543"
"7693412532098591327667236328125",
-324, zero, first_subnormal, zero);
TestHalfwayValue(
"7."
"410984687618698162648531893023320585475897039214871466383785237510132609"
"053131277979497545424539885696948470431685765963899850655339096945981621"
"940161728171894510697854671067917687257517734731555330779540854980960845"
"750095811137303474765809687100959097544227100475730780971111893578483867"
"565399878350301522805593404659373979179073872386829939581848166016912201"
"945649993128979841136206248449867871357218035220901702390328579173252022"
"052897402080290685402160661237554998340267130003581248647904138574340187"
"552090159017259254714629617513415977493871857473787096164563890871811984"
"127167305601704549300470526959016576377688490826798697257336652176556794"
"107250876433756084600398490497214911746308553955635418864151316847843631"
"3080237596295773983001708984375",
-324, first_subnormal, second_subnormal, second_subnormal);
TestHalfwayValue(
"2."
"225073858507201136057409796709131975934819546351645648023426109724822222"
"021076945516529523908135087914149158913039621106870086438694594645527657"
"207407820621743379988141063267329253552286881372149012981122451451889849"
"057222307285255133155755015914397476397983411801999323962548289017107081"
"850690630666655994938275772572015763062690663332647565300009245888316433"
"037779791869612049497390377829704905051080609940730262937128958950003583"
"799967207254304360284078895771796150945516748243471030702609144621572289"
"880258182545180325707018860872113128079512233426288368622321503775666622"
"503982534335974568884423900265498198385487948292206894721689831099698365"
"846814022854243330660339850886445804001034933970427567186443383770486037"
"86162277173854562306587467901408672332763671875",
-308, last_subnormal, first_normal, first_normal);
TestHalfwayValue(
"2."
"225073858507201630123055637955676152503612414573018013083228724049586647"
"606759446192036794116886953213985520549032000903434781884412325572184367"
"563347617020518175998922941393629966742598285899994830148971433555578567"
"693279306015978183162142425067962460785295885199272493577688320732492479"
"924816869232247165964934329258783950102250973957579510571600738343645738"
"494324192997092179207389919761694314131497173265255020084997973676783743"
"155205818804439163810572367791175177756227497413804253387084478193655533"
"073867420834526162513029462022730109054820067654020201547112002028139700"
"141575259123440177362244273712468151750189745559978653234255886219611516"
"335924167958029604477064946470184777360934300451421683607013647479513962"
"13837722826145437693412532098591327667236328125",
-308, first_normal, second_normal, first_normal);
TestHalfwayValue(
"1."
"797693134862315608353258760581052985162070023416521662616611746258695532"
"672923265745300992879465492467506314903358770175220871059269879629062776"
"047355692132901909191523941804762171253349609463563872612866401980290377"
"995141836029815117562837277714038305214839639239356331336428021390916694"
"57927874464075218944",
308, penultimate_normal, last_normal, penultimate_normal);
}
TEST(FromChars, FloatRounding) {
const float zero = 0.0;
const float first_subnormal = nextafterf(zero, 1.0);
const float second_subnormal = nextafterf(first_subnormal, 1.0);
const float first_normal = FLT_MIN;
const float last_subnormal = nextafterf(first_normal, 0.0);
const float second_normal = nextafterf(first_normal, 1.0);
const float last_normal = FLT_MAX;
const float penultimate_normal = nextafterf(last_normal, 0.0);
TestHalfwayValue(
"7."
"006492321624085354618647916449580656401309709382578858785341419448955413"
"42930300743319094181060791015625",
-46, zero, first_subnormal, zero);
TestHalfwayValue(
"2."
"101947696487225606385594374934874196920392912814773657635602425834686624"
"028790902229957282543182373046875",
-45, first_subnormal, second_subnormal, second_subnormal);
TestHalfwayValue(
"1."
"175494280757364291727882991035766513322858992758990427682963118425003064"
"9651730385585324256680905818939208984375",
-38, last_subnormal, first_normal, first_normal);
TestHalfwayValue(
"1."
"175494420887210724209590083408724842314472120785184615334540294131831453"
"9442813071445925743319094181060791015625",
-38, first_normal, second_normal, first_normal);
TestHalfwayValue("3.40282336497324057985868971510891282432", 38,
penultimate_normal, last_normal, penultimate_normal);
}
TEST(FromChars, Underflow) {
double d;
float f;
absl::from_chars_result result;
std::string negative_underflow = "-1e-1000";
const char* begin = negative_underflow.data();
const char* end = begin + negative_underflow.size();
d = 100.0;
result = absl::from_chars(begin, end, d);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_TRUE(std::signbit(d));
EXPECT_GE(d, -std::numeric_limits<double>::min());
f = 100.0;
result = absl::from_chars(begin, end, f);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_TRUE(std::signbit(f));
EXPECT_GE(f, -std::numeric_limits<float>::min());
std::string positive_underflow = "1e-1000";
begin = positive_underflow.data();
end = begin + positive_underflow.size();
d = -100.0;
result = absl::from_chars(begin, end, d);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_FALSE(std::signbit(d));
EXPECT_LE(d, std::numeric_limits<double>::min());
f = -100.0;
result = absl::from_chars(begin, end, f);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_FALSE(std::signbit(f));
EXPECT_LE(f, std::numeric_limits<float>::min());
}
TEST(FromChars, Overflow) {
double d;
float f;
absl::from_chars_result result;
std::string negative_overflow = "-1e1000";
const char* begin = negative_overflow.data();
const char* end = begin + negative_overflow.size();
d = 100.0;
result = absl::from_chars(begin, end, d);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_TRUE(std::signbit(d));
EXPECT_EQ(d, -std::numeric_limits<double>::max());
f = 100.0;
result = absl::from_chars(begin, end, f);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_TRUE(std::signbit(f));
EXPECT_EQ(f, -std::numeric_limits<float>::max());
std::string positive_overflow = "1e1000";
begin = positive_overflow.data();
end = begin + positive_overflow.size();
d = -100.0;
result = absl::from_chars(begin, end, d);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_FALSE(std::signbit(d));
EXPECT_EQ(d, std::numeric_limits<double>::max());
f = -100.0;
result = absl::from_chars(begin, end, f);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_FALSE(std::signbit(f));
EXPECT_EQ(f, std::numeric_limits<float>::max());
}
TEST(FromChars, RegressionTestsFromFuzzer) {
absl::string_view src = "0x21900000p00000000099";
float f;
auto result = absl::from_chars(src.data(), src.data() + src.size(), f);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
}
TEST(FromChars, ReturnValuePtr) {
double d;
absl::from_chars_result result;
std::string normal = "3.14@#$%@#$%";
result = absl::from_chars(normal.data(), normal.data() + normal.size(), d);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(result.ptr - normal.data(), 4);
std::string overflow = "1e1000@#$%@#$%";
result = absl::from_chars(overflow.data(),
overflow.data() + overflow.size(), d);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_EQ(result.ptr - overflow.data(), 6);
std::string garbage = "#$%@#$%";
result = absl::from_chars(garbage.data(),
garbage.data() + garbage.size(), d);
EXPECT_EQ(result.ec, std::errc::invalid_argument);
EXPECT_EQ(result.ptr - garbage.data(), 0);
}
TEST(FromChars, TestVersusStrtod) {
for (int mantissa = 1000000; mantissa <= 9999999; mantissa += 501) {
for (int exponent = -300; exponent < 300; ++exponent) {
std::string candidate = absl::StrCat(mantissa, "e", exponent);
double strtod_value = strtod(candidate.c_str(), nullptr);
double absl_value = 0;
absl::from_chars(candidate.data(), candidate.data() + candidate.size(),
absl_value);
ASSERT_EQ(strtod_value, absl_value) << candidate;
}
}
}
TEST(FromChars, TestVersusStrtof) {
for (int mantissa = 1000000; mantissa <= 9999999; mantissa += 501) {
for (int exponent = -43; exponent < 32; ++exponent) {
std::string candidate = absl::StrCat(mantissa, "e", exponent);
float strtod_value = strtof(candidate.c_str(), nullptr);
float absl_value = 0;
absl::from_chars(candidate.data(), candidate.data() + candidate.size(),
absl_value);
ASSERT_EQ(strtod_value, absl_value) << candidate;
}
}
}
template <typename Float>
bool Identical(Float a, Float b) {
return 0 == memcmp(&a, &b, sizeof(Float));
}
TEST(FromChars, NaNDoubles) {
for (std::string n_char_sequence :
{"", "1", "2", "3", "fff", "FFF", "200000", "400000", "4000000000000",
"8000000000000", "abc123", "legal_but_unexpected",
"99999999999999999999999", "_"}) {
std::string input = absl::StrCat("nan(", n_char_sequence, ")");
SCOPED_TRACE(input);
double from_chars_double;
absl::from_chars(input.data(), input.data() + input.size(),
from_chars_double);
double std_nan_double = std::nan(n_char_sequence.c_str());
EXPECT_TRUE(Identical(from_chars_double, std_nan_double));
#if ABSL_STRTOD_HANDLES_NAN_CORRECTLY
double strtod_double = strtod(input.c_str(), nullptr);
EXPECT_TRUE(Identical(from_chars_double, strtod_double));
#endif
std::string negative_input = "-" + input;
double negative_from_chars_double;
absl::from_chars(negative_input.data(),
negative_input.data() + negative_input.size(),
negative_from_chars_double);
EXPECT_TRUE(std::signbit(negative_from_chars_double));
EXPECT_FALSE(Identical(negative_from_chars_double, from_chars_double));
from_chars_double = std::copysign(from_chars_double, -1.0);
EXPECT_TRUE(Identical(negative_from_chars_double, from_chars_double));
}
}
TEST(FromChars, NaNFloats) {
for (std::string n_char_sequence :
{"", "1", "2", "3", "fff", "FFF", "200000", "400000", "4000000000000",
"8000000000000", "abc123", "legal_but_unexpected",
"99999999999999999999999", "_"}) {
std::string input = absl::StrCat("nan(", n_char_sequence, ")");
SCOPED_TRACE(input);
float from_chars_float;
absl::from_chars(input.data(), input.data() + input.size(),
from_chars_float);
float std_nan_float = std::nanf(n_char_sequence.c_str());
EXPECT_TRUE(Identical(from_chars_float, std_nan_float));
#if ABSL_STRTOD_HANDLES_NAN_CORRECTLY
float strtof_float = strtof(input.c_str(), nullptr);
EXPECT_TRUE(Identical(from_chars_float, strtof_float));
#endif
std::string negative_input = "-" + input;
float negative_from_chars_float;
absl::from_chars(negative_input.data(),
negative_input.data() + negative_input.size(),
negative_from_chars_float);
EXPECT_TRUE(std::signbit(negative_from_chars_float));
EXPECT_FALSE(Identical(negative_from_chars_float, from_chars_float));
from_chars_float = std::copysign(from_chars_float, -1.0f);
EXPECT_TRUE(Identical(negative_from_chars_float, from_chars_float));
}
}
int NextStep(int step) {
return step + (step >> 2) + 1;
}
template <typename Float>
void TestOverflowAndUnderflow(
const std::function<std::string(int)>& input_generator,
const std::function<Float(int)>& expected_generator, int lower_bound,
int upper_bound) {
int index, step;
for (index = lower_bound, step = 1; index < upper_bound;
index += step, step = NextStep(step)) {
std::string input = input_generator(index);
SCOPED_TRACE(input);
Float expected = expected_generator(index);
Float actual;
auto result =
absl::from_chars(input.data(), input.data() + input.size(), actual);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(expected, actual)
<< absl::StrFormat("%a vs %a", expected, actual);
}
for (index = upper_bound, step = 1; index > lower_bound;
index -= step, step = NextStep(step)) {
std::string input = input_generator(index);
SCOPED_TRACE(input);
Float expected = expected_generator(index);
Float actual;
auto result =
absl::from_chars(input.data(), input.data() + input.size(), actual);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(expected, actual)
<< absl::StrFormat("%a vs %a", expected, actual);
}
for (index = lower_bound - 1, step = 1; index > -1000000;
index -= step, step = NextStep(step)) {
std::string input = input_generator(index);
SCOPED_TRACE(input);
Float actual;
auto result =
absl::from_chars(input.data(), input.data() + input.size(), actual);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_LT(actual, 1.0);
}
for (index = upper_bound + 1, step = 1; index < 1000000;
index += step, step = NextStep(step)) {
std::string input = input_generator(index);
SCOPED_TRACE(input);
Float actual;
auto result =
absl::from_chars(input.data(), input.data() + input.size(), actual);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_GT(actual, 1.0);
}
}
TEST(FromChars, HexdecimalDoubleLimits) {
auto input_gen = [](int index) { return absl::StrCat("0x1.0p", index); };
auto expected_gen = [](int index) { return std::ldexp(1.0, index); };
TestOverflowAndUnderflow<doub | bool Power10Overflow(int n) { return n >= kPower10TableMaxExclusive; } | TEST(FromChars, NearRoundingCasesExplicit) {
EXPECT_EQ(ToDouble("5.e125"), ldexp(6653062250012735, 365));
EXPECT_EQ(ToDouble("69.e267"), ldexp(4705683757438170, 841));
EXPECT_EQ(ToDouble("999.e-026"), ldexp(6798841691080350, -129));
EXPECT_EQ(ToDouble("7861.e-034"), ldexp(8975675289889240, -153));
EXPECT_EQ(ToDouble("75569.e-254"), ldexp(6091718967192243, -880));
EXPECT_EQ(ToDouble("928609.e-261"), ldexp(7849264900213743, -900));
EXPECT_EQ(ToDouble("9210917.e080"), ldexp(8341110837370930, 236));
EXPECT_EQ(ToDouble("84863171.e114"), ldexp(4625202867375927, 353));
EXPECT_EQ(ToDouble("653777767.e273"), ldexp(5068902999763073, 884));
EXPECT_EQ(ToDouble("5232604057.e-298"), ldexp(5741343011915040, -1010));
EXPECT_EQ(ToDouble("27235667517.e-109"), ldexp(6707124626673586, -380));
EXPECT_EQ(ToDouble("653532977297.e-123"), ldexp(7078246407265384, -422));
EXPECT_EQ(ToDouble("3142213164987.e-294"), ldexp(8219991337640559, -988));
EXPECT_EQ(ToDouble("46202199371337.e-072"), ldexp(5224462102115359, -246));
EXPECT_EQ(ToDouble("231010996856685.e-073"), ldexp(5224462102115359, -247));
EXPECT_EQ(ToDouble("9324754620109615.e212"), ldexp(5539753864394442, 705));
EXPECT_EQ(ToDouble("78459735791271921.e049"), ldexp(8388176519442766, 166));
EXPECT_EQ(ToDouble("272104041512242479.e200"), ldexp(5554409530847367, 670));
EXPECT_EQ(ToDouble("6802601037806061975.e198"), ldexp(5554409530847367, 668));
EXPECT_EQ(ToDouble("20505426358836677347.e-221"),
ldexp(4524032052079546, -722));
EXPECT_EQ(ToDouble("836168422905420598437.e-234"),
ldexp(5070963299887562, -760));
EXPECT_EQ(ToDouble("4891559871276714924261.e222"),
ldexp(6452687840519111, 757));
EXPECT_EQ(ToFloat("5.e-20"), ldexpf(15474250, -88));
EXPECT_EQ(ToFloat("67.e14"), ldexpf(12479722, 29));
EXPECT_EQ(ToFloat("985.e15"), ldexpf(14333636, 36));
EXPECT_EQ(ToFloat("7693.e-42"), ldexpf(10979816, -150));
EXPECT_EQ(ToFloat("55895.e-16"), ldexpf(12888509, -61));
EXPECT_EQ(ToFloat("996622.e-44"), ldexpf(14224264, -150));
EXPECT_EQ(ToFloat("7038531.e-32"), ldexpf(11420669, -107));
EXPECT_EQ(ToFloat("60419369.e-46"), ldexpf(8623340, -150));
EXPECT_EQ(ToFloat("702990899.e-20"), ldexpf(16209866, -61));
EXPECT_EQ(ToFloat("6930161142.e-48"), ldexpf(9891056, -150));
EXPECT_EQ(ToFloat("25933168707.e-13"), ldexpf(11138211, -32));
EXPECT_EQ(ToFloat("596428896559.e20"), ldexpf(12333860, 82));
EXPECT_EQ(ToDouble("9.e-265"), ldexp(8168427841980010, -930));
EXPECT_EQ(ToDouble("85.e-037"), ldexp(6360455125664090, -169));
EXPECT_EQ(ToDouble("623.e100"), ldexp(6263531988747231, 289));
EXPECT_EQ(ToDouble("3571.e263"), ldexp(6234526311072170, 833));
EXPECT_EQ(ToDouble("81661.e153"), ldexp(6696636728760206, 472));
EXPECT_EQ(ToDouble("920657.e-023"), ldexp(5975405561110124, -109));
EXPECT_EQ(ToDouble("4603285.e-024"), ldexp(5975405561110124, -110));
EXPECT_EQ(ToDouble("87575437.e-309"), ldexp(8452160731874668, -1053));
EXPECT_EQ(ToDouble("245540327.e122"), ldexp(4985336549131723, 381));
EXPECT_EQ(ToDouble("6138508175.e120"), ldexp(4985336549131723, 379));
EXPECT_EQ(ToDouble("83356057653.e193"), ldexp(5986732817132056, 625));
EXPECT_EQ(ToDouble("619534293513.e124"), ldexp(4798406992060657, 399));
EXPECT_EQ(ToDouble("2335141086879.e218"), ldexp(5419088166961646, 713));
EXPECT_EQ(ToDouble("36167929443327.e-159"), ldexp(8135819834632444, -536));
EXPECT_EQ(ToDouble("609610927149051.e-255"), ldexp(4576664294594737, -850));
EXPECT_EQ(ToDouble("3743626360493413.e-165"), ldexp(6898586531774201, -549));
EXPECT_EQ(ToDouble("94080055902682397.e-242"), ldexp(6273271706052298, -800));
EXPECT_EQ(ToDouble("899810892172646163.e283"), ldexp(7563892574477827, 947));
EXPECT_EQ(ToDouble("7120190517612959703.e120"), ldexp(5385467232557565, 409));
EXPECT_EQ(ToDouble("25188282901709339043.e-252"),
ldexp(5635662608542340, -825));
EXPECT_EQ(ToDouble("308984926168550152811.e-052"),
ldexp(5644774693823803, -157));
EXPECT_EQ(ToDouble("6372891218502368041059.e064"),
ldexp(4616868614322430, 233));
EXPECT_EQ(ToFloat("3.e-23"), ldexpf(9507380, -98));
EXPECT_EQ(ToFloat("57.e18"), ldexpf(12960300, 42));
EXPECT_EQ(ToFloat("789.e-35"), ldexpf(10739312, -130));
EXPECT_EQ(ToFloat("2539.e-18"), ldexpf(11990089, -72));
EXPECT_EQ(ToFloat("76173.e28"), ldexpf(9845130, 86));
EXPECT_EQ(ToFloat("887745.e-11"), ldexpf(9760860, -40));
EXPECT_EQ(ToFloat("5382571.e-37"), ldexpf(11447463, -124));
EXPECT_EQ(ToFloat("82381273.e-35"), ldexpf(8554961, -113));
EXPECT_EQ(ToFloat("750486563.e-38"), ldexpf(9975678, -120));
EXPECT_EQ(ToFloat("3752432815.e-39"), ldexpf(9975678, -121));
EXPECT_EQ(ToFloat("75224575729.e-45"), ldexpf(13105970, -137));
EXPECT_EQ(ToFloat("459926601011.e15"), ldexpf(12466336, 65));
} |
#include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/s3_metadata.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/std_optional.h"
ABSL_FLAG(std::optional<std::string>,
tensorstore_aws_ec2_metadata_service_endpoint, std::nullopt,
"Endpoint to used for http access AWS metadata service. "
"Overrides AWS_EC2_METADATA_SERVICE_ENDPOINT.");
using ::tensorstore::Result;
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal::ParseJson;
using ::tensorstore::internal_http::HttpRequestBuilder;
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kMetadataTokenHeader[] = "x-aws-ec2-metadata-token:";
static constexpr char kIamCredentialsPath[] =
"/latest/meta-data/iam/security-credentials/";
static constexpr absl::Duration kConnectTimeout = absl::Milliseconds(200);
static constexpr absl::Duration kDefaultTimeout = absl::Minutes(5);
static constexpr char kSuccess[] = "Success";
std::string GetEC2MetadataServiceEndpoint() {
return GetFlagOrEnvValue(FLAGS_tensorstore_aws_ec2_metadata_service_endpoint,
"AWS_EC2_METADATA_SERVICE_ENDPOINT")
.value_or("http:
}
struct EC2CredentialsResponse {
std::string code;
std::optional<absl::Time> last_updated;
std::optional<std::string> type;
std::optional<std::string> access_key_id;
std::optional<std::string> secret_access_key;
std::optional<std::string> token;
std::optional<absl::Time> expiration;
};
inline constexpr auto EC2CredentialsResponseBinder = jb::Object(
jb::Member("Code", jb::Projection(&EC2CredentialsResponse::code)),
jb::OptionalMember("LastUpdated",
jb::Projection(&EC2CredentialsResponse::last_updated)),
jb::OptionalMember("Type", jb::Projection(&EC2CredentialsResponse::type)),
jb::OptionalMember("AccessKeyId",
jb::Projection(&EC2CredentialsResponse::access_key_id)),
jb::OptionalMember(
"SecretAccessKey",
jb::Projection(&EC2CredentialsResponse::secret_access_key)),
jb::OptionalMember("Token", jb::Projection(&EC2CredentialsResponse::token)),
jb::OptionalMember("Expiration",
jb::Projection(&EC2CredentialsResponse::expiration)));
Result<absl::Cord> GetEC2ApiToken(std::string_view endpoint,
internal_http::HttpTransport& transport) {
auto token_request =
HttpRequestBuilder("POST",
tensorstore::StrCat(endpoint, "/latest/api/token"))
.AddHeader("x-aws-ec2-metadata-token-ttl-seconds: 21600")
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto token_response,
transport
.IssueRequest(token_request,
internal_http::IssueRequestOptions()
.SetRequestTimeout(absl::InfiniteDuration())
.SetConnectTimeout(kConnectTimeout))
.result());
bool is_retryable = false;
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(token_response, is_retryable));
return std::move(token_response.payload);
}
}
Result<AwsCredentials> EC2MetadataCredentialProvider::GetCredentials() {
if (endpoint_.empty()) {
endpoint_ = GetEC2MetadataServiceEndpoint();
}
TENSORSTORE_ASSIGN_OR_RETURN(auto api_token,
GetEC2ApiToken(endpoint_, *transport_));
auto token_header = tensorstore::StrCat(kMetadataTokenHeader, api_token);
auto iam_role_request =
HttpRequestBuilder("GET",
tensorstore::StrCat(endpoint_, kIamCredentialsPath))
.AddHeader(token_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_role_response,
transport_->IssueRequest(iam_role_request, {}).result());
auto iam_role_plain_text = iam_role_response.payload.Flatten();
bool is_retryable = false;
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(iam_role_response, is_retryable));
std::vector<std::string_view> iam_roles =
absl::StrSplit(iam_role_plain_text, '\n', absl::SkipWhitespace());
if (iam_roles.empty()) {
return absl::NotFoundError("Empty EC2 Role list");
}
auto iam_credentials_request_url =
tensorstore::StrCat(endpoint_, kIamCredentialsPath, iam_roles[0]);
auto iam_credentials_request =
HttpRequestBuilder("GET", iam_credentials_request_url)
.AddHeader(token_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_credentials_response,
transport_->IssueRequest(iam_credentials_request, {}).result());
auto iam_credentials_plain_text = iam_credentials_response.payload.Flatten();
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(iam_credentials_response, is_retryable));
auto json_credentials = ParseJson(iam_credentials_plain_text);
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_credentials,
jb::FromJson<EC2CredentialsResponse>(json_credentials,
EC2CredentialsResponseBinder));
if (iam_credentials.code != kSuccess) {
return absl::NotFoundError(
absl::StrCat("EC2Metadata request to [", iam_credentials_request_url,
"] failed with code ", iam_credentials.code));
}
auto default_timeout = absl::Now() + kDefaultTimeout;
auto expires_at =
iam_credentials.expiration.value_or(default_timeout) - absl::Seconds(60);
return AwsCredentials{iam_credentials.access_key_id.value_or(""),
iam_credentials.secret_access_key.value_or(""),
iam_credentials.token.value_or(""), expires_at};
}
}
} | #include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/kvstore/s3/credentials/test_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::DefaultEC2MetadataFlow;
using ::tensorstore::internal_kvstore_s3::EC2MetadataCredentialProvider;
static constexpr char kDefaultEndpoint[] = "http:
static constexpr char kCustomEndpoint[] = "http:
static constexpr char kApiToken[] = "1234567890";
static constexpr char kAccessKey[] = "ASIA1234567890";
static constexpr char kSecretKey[] = "1234567890abcdef";
static constexpr char kSessionToken[] = "abcdef123456790";
class EC2MetadataCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override { UnsetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT"); }
};
TEST_F(EC2MetadataCredentialProviderTest, CredentialRetrievalFlow) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kDefaultEndpoint, kApiToken, kAccessKey,
kSecretKey, kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, EnvironmentVariableMetadataServer) {
SetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT", kCustomEndpoint);
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, InjectedMetadataServer) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider = std::make_shared<EC2MetadataCredentialProvider>(
kCustomEndpoint, mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, NoIamRolesInSecurityCredentials) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{""}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
ASSERT_FALSE(provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
EXPECT_THAT(provider->GetCredentials().status().ToString(),
::testing::HasSubstr("Empty EC2 Role list"));
}
TEST_F(EC2MetadataCredentialProviderTest, UnsuccessfulJsonResponse) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{"info"}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET http:
HttpResponse{200,
absl::Cord{"mock-iam-role"},
{{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET "
"http:
"mock-iam-role",
HttpResponse{200,
absl::Cord(R"({"Code": "EntirelyUnsuccessful"})"),
{{"x-aws-ec2-metadata-token", kApiToken}}}}};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
auto credentials = provider->GetCredentials();
EXPECT_THAT(credentials.status(), MatchesStatus(absl::StatusCode::kNotFound));
EXPECT_THAT(credentials.status().ToString(),
::testing::AllOf(::testing::HasSubstr("EC2Metadata request"),
::testing::HasSubstr("EntirelyUnsuccessful")));
}
} | std::string GetEC2MetadataServiceEndpoint() {
return GetFlagOrEnvValue(FLAGS_tensorstore_aws_ec2_metadata_service_endpoint,
"AWS_EC2_METADATA_SERVICE_ENDPOINT")
.value_or("http:
} | TEST_F(EC2MetadataCredentialProviderTest, CredentialRetrievalFlow) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kDefaultEndpoint, kApiToken, kAccessKey,
kSecretKey, kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, EnvironmentVariableMetadataServer) {
SetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT", kCustomEndpoint);
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, InjectedMetadataServer) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider = std::make_shared<EC2MetadataCredentialProvider>(
kCustomEndpoint, mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, NoIamRolesInSecurityCredentials) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{""}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
ASSERT_FALSE(provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
EXPECT_THAT(provider->GetCredentials().status().ToString(),
::testing::HasSubstr("Empty EC2 Role list"));
}
TEST_F(EC2MetadataCredentialProviderTest, UnsuccessfulJsonResponse) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{"info"}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET http:
HttpResponse{200,
absl::Cord{"mock-iam-role"},
{{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET "
"http:
"mock-iam-role",
HttpResponse{200,
absl::Cord(R"({"Code": "EntirelyUnsuccessful"})"),
{{"x-aws-ec2-metadata-token", kApiToken}}}}};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
auto credentials = provider->GetCredentials();
EXPECT_THAT(credentials.status(), MatchesStatus(absl::StatusCode::kNotFound));
EXPECT_THAT(credentials.status().ToString(),
::testing::AllOf(::testing::HasSubstr("EC2Metadata request"),
::testing::HasSubstr("EntirelyUnsuccessful")));
} |
#ifndef ABSL_NUMERIC_INTERNAL_BITS_H_
#define ABSL_NUMERIC_INTERNAL_BITS_H_
#include <cstdint>
#include <limits>
#include <type_traits>
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
#endif
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#if defined(__GNUC__) && !defined(__clang__)
#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) 1
#else
#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) ABSL_HAVE_BUILTIN(x)
#endif
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountl) && \
ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT constexpr
#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 1
#else
#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT
#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 0
#endif
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz) && \
ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
#define ABSL_INTERNAL_CONSTEXPR_CLZ constexpr
#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 1
#else
#define ABSL_INTERNAL_CONSTEXPR_CLZ
#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 0
#endif
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz) && \
ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
#define ABSL_INTERNAL_CONSTEXPR_CTZ constexpr
#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 1
#else
#define ABSL_INTERNAL_CONSTEXPR_CTZ
#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 0
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace numeric_internal {
constexpr bool IsPowerOf2(unsigned int x) noexcept {
return x != 0 && (x & (x - 1)) == 0;
}
template <class T>
ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
T x, int s) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
return static_cast<T>(x >> (s & (std::numeric_limits<T>::digits - 1))) |
static_cast<T>(x << ((-s) & (std::numeric_limits<T>::digits - 1)));
}
template <class T>
ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft(
T x, int s) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
Popcount32(uint32_t x) noexcept {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcount)
static_assert(sizeof(unsigned int) == sizeof(x),
"__builtin_popcount does not take 32-bit arg");
return __builtin_popcount(x);
#else
x -= ((x >> 1) & 0x55555555);
x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
return static_cast<int>((((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24);
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
Popcount64(uint64_t x) noexcept {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
static_assert(sizeof(unsigned long long) == sizeof(x),
"__builtin_popcount does not take 64-bit arg");
return __builtin_popcountll(x);
#else
x -= (x >> 1) & 0x5555555555555555ULL;
x = ((x >> 2) & 0x3333333333333333ULL) + (x & 0x3333333333333333ULL);
return static_cast<int>(
(((x + (x >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
#endif
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
Popcount(T x) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(x) <= sizeof(uint64_t), "T is too large");
return sizeof(x) <= sizeof(uint32_t) ? Popcount32(x) : Popcount64(x);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes32(uint32_t x) {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz)
static_assert(sizeof(unsigned int) == sizeof(x),
"__builtin_clz does not take 32-bit arg");
return x == 0 ? 32 : __builtin_clz(x);
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0;
if (_BitScanReverse(&result, x)) {
return 31 - result;
}
return 32;
#else
int zeroes = 28;
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes16(uint16_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_clzg)
return x == 0 ? 16 : __builtin_clzg(x);
#elif ABSL_HAVE_BUILTIN(__builtin_clzs)
static_assert(sizeof(unsigned short) == sizeof(x),
"__builtin_clzs does not take 16-bit arg");
return x == 0 ? 16 : __builtin_clzs(x);
#else
return CountLeadingZeroes32(x) - 16;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes64(uint64_t x) {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
static_assert(sizeof(unsigned long long) == sizeof(x),
"__builtin_clzll does not take 64-bit arg");
return x == 0 ? 64 : __builtin_clzll(x);
#elif defined(_MSC_VER) && !defined(__clang__) && \
(defined(_M_X64) || defined(_M_ARM64))
unsigned long result = 0;
if (_BitScanReverse64(&result, x)) {
return 63 - result;
}
return 64;
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0;
if ((x >> 32) &&
_BitScanReverse(&result, static_cast<unsigned long>(x >> 32))) {
return 31 - result;
}
if (_BitScanReverse(&result, static_cast<unsigned long>(x))) {
return 63 - result;
}
return 64;
#else
int zeroes = 60;
if (x >> 32) {
zeroes -= 32;
x >>= 32;
}
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
#endif
}
template <typename T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes(T x) {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
return sizeof(T) <= sizeof(uint16_t)
? CountLeadingZeroes16(static_cast<uint16_t>(x)) -
(std::numeric_limits<uint16_t>::digits -
std::numeric_limits<T>::digits)
: (sizeof(T) <= sizeof(uint32_t)
? CountLeadingZeroes32(static_cast<uint32_t>(x)) -
(std::numeric_limits<uint32_t>::digits -
std::numeric_limits<T>::digits)
: CountLeadingZeroes64(x));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero32(uint32_t x) {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz)
static_assert(sizeof(unsigned int) == sizeof(x),
"__builtin_ctz does not take 32-bit arg");
return __builtin_ctz(x);
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0;
_BitScanForward(&result, x);
return result;
#else
int c = 31;
x &= ~x + 1;
if (x & 0x0000FFFF) c -= 16;
if (x & 0x00FF00FF) c -= 8;
if (x & 0x0F0F0F0F) c -= 4;
if (x & 0x33333333) c -= 2;
if (x & 0x55555555) c -= 1;
return c;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero64(uint64_t x) {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
static_assert(sizeof(unsigned long long) == sizeof(x),
"__builtin_ctzll does not take 64-bit arg");
return __builtin_ctzll(x);
#elif defined(_MSC_VER) && !defined(__clang__) && \
(defined(_M_X64) || defined(_M_ARM64))
unsigned long result = 0;
_BitScanForward64(&result, x);
return result;
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0;
if (static_cast<uint32_t>(x) == 0) {
_BitScanForward(&result, static_cast<unsigned long>(x >> 32));
return result + 32;
}
_BitScanForward(&result, static_cast<unsigned long>(x));
return result;
#else
int c = 63;
x &= ~x + 1;
if (x & 0x00000000FFFFFFFF) c -= 32;
if (x & 0x0000FFFF0000FFFF) c -= 16;
if (x & 0x00FF00FF00FF00FF) c -= 8;
if (x & 0x0F0F0F0F0F0F0F0F) c -= 4;
if (x & 0x3333333333333333) c -= 2;
if (x & 0x5555555555555555) c -= 1;
return c;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero16(uint16_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_ctzg)
return __builtin_ctzg(x);
#elif ABSL_HAVE_BUILTIN(__builtin_ctzs)
static_assert(sizeof(unsigned short) == sizeof(x),
"__builtin_ctzs does not take 16-bit arg");
return __builtin_ctzs(x);
#else
return CountTrailingZeroesNonzero32(x);
#endif
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroes(T x) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
return x == 0 ? std::numeric_limits<T>::digits
: (sizeof(T) <= sizeof(uint16_t)
? CountTrailingZeroesNonzero16(static_cast<uint16_t>(x))
: (sizeof(T) <= sizeof(uint32_t)
? CountTrailingZeroesNonzero32(
static_cast<uint32_t>(x))
: CountTrailingZeroesNonzero64(x)));
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
BitCeilPromotionHelper(T x, T promotion) {
return (T{1} << (x + promotion)) >> promotion;
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
BitCeilNonPowerOf2(T x) {
return BitCeilPromotionHelper(
static_cast<T>(std::numeric_limits<T>::digits - CountLeadingZeroes(x)),
T{sizeof(T) >= sizeof(unsigned) ? 0
: std::numeric_limits<unsigned>::digits -
std::numeric_limits<T>::digits});
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/numeric/bits.h"
#include <limits>
#include <type_traits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/random.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
template <typename IntT>
class IntegerTypesTest : public ::testing::Test {};
using OneByteIntegerTypes = ::testing::Types<
unsigned char,
uint8_t
>;
TYPED_TEST_SUITE(IntegerTypesTest, OneByteIntegerTypes);
TYPED_TEST(IntegerTypesTest, HandlesTypes) {
using UIntType = TypeParam;
EXPECT_EQ(rotl(UIntType{0x12}, 0), uint8_t{0x12});
EXPECT_EQ(rotr(UIntType{0x12}, -4), uint8_t{0x21});
static_assert(rotl(UIntType{0x12}, 0) == uint8_t{0x12}, "");
static_assert(rotr(UIntType{0x12}, 0) == uint8_t{0x12}, "");
EXPECT_EQ(rotr(UIntType{0x12}, 0), uint8_t{0x12});
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_zero(UIntType{}) == 8, "");
static_assert(countl_zero(static_cast<UIntType>(-1)) == 0, "");
static_assert(countl_one(UIntType{}) == 0, "");
static_assert(countl_one(static_cast<UIntType>(-1)) == 8, "");
static_assert(countr_zero(UIntType{}) == 8, "");
static_assert(countr_zero(static_cast<UIntType>(-1)) == 0, "");
static_assert(countr_one(UIntType{}) == 0, "");
static_assert(countr_one(static_cast<UIntType>(-1)) == 8, "");
static_assert(popcount(UIntType{}) == 0, "");
static_assert(popcount(UIntType{1}) == 1, "");
static_assert(popcount(static_cast<UIntType>(-1)) == 8, "");
static_assert(bit_width(UIntType{}) == 0, "");
static_assert(bit_width(UIntType{1}) == 1, "");
static_assert(bit_width(UIntType{3}) == 2, "");
static_assert(bit_width(static_cast<UIntType>(-1)) == 8, "");
#endif
EXPECT_EQ(countl_zero(UIntType{}), 8);
EXPECT_EQ(countl_zero(static_cast<UIntType>(-1)), 0);
EXPECT_EQ(countl_one(UIntType{}), 0);
EXPECT_EQ(countl_one(static_cast<UIntType>(-1)), 8);
EXPECT_EQ(countr_zero(UIntType{}), 8);
EXPECT_EQ(countr_zero(static_cast<UIntType>(-1)), 0);
EXPECT_EQ(countr_one(UIntType{}), 0);
EXPECT_EQ(countr_one(static_cast<UIntType>(-1)), 8);
EXPECT_EQ(popcount(UIntType{}), 0);
EXPECT_EQ(popcount(UIntType{1}), 1);
EXPECT_FALSE(has_single_bit(UIntType{}));
EXPECT_FALSE(has_single_bit(static_cast<UIntType>(-1)));
EXPECT_EQ(bit_width(UIntType{}), 0);
EXPECT_EQ(bit_width(UIntType{1}), 1);
EXPECT_EQ(bit_width(UIntType{3}), 2);
EXPECT_EQ(bit_width(static_cast<UIntType>(-1)), 8);
}
TEST(Rotate, Left) {
static_assert(rotl(uint8_t{0x12}, 0) == uint8_t{0x12}, "");
static_assert(rotl(uint16_t{0x1234}, 0) == uint16_t{0x1234}, "");
static_assert(rotl(uint32_t{0x12345678UL}, 0) == uint32_t{0x12345678UL}, "");
static_assert(rotl(uint64_t{0x12345678ABCDEF01ULL}, 0) ==
uint64_t{0x12345678ABCDEF01ULL},
"");
EXPECT_EQ(rotl(uint8_t{0x12}, 0), uint8_t{0x12});
EXPECT_EQ(rotl(uint16_t{0x1234}, 0), uint16_t{0x1234});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 0), uint32_t{0x12345678UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 0),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, 8), uint8_t{0x12});
EXPECT_EQ(rotl(uint16_t{0x1234}, 16), uint16_t{0x1234});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 32), uint32_t{0x12345678UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, -8), uint8_t{0x12});
EXPECT_EQ(rotl(uint16_t{0x1234}, -16), uint16_t{0x1234});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, -32), uint32_t{0x12345678UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, -64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, 4), uint8_t{0x21});
EXPECT_EQ(rotl(uint16_t{0x1234}, 4), uint16_t{0x2341});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 4), uint32_t{0x23456781UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 4),
uint64_t{0x2345678ABCDEF011ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, -4), uint8_t{0x21});
EXPECT_EQ(rotl(uint16_t{0x1234}, -4), uint16_t{0x4123});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, -4), uint32_t{0x81234567UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, -4),
uint64_t{0x112345678ABCDEF0ULL});
}
TEST(Rotate, Right) {
static_assert(rotr(uint8_t{0x12}, 0) == uint8_t{0x12}, "");
static_assert(rotr(uint16_t{0x1234}, 0) == uint16_t{0x1234}, "");
static_assert(rotr(uint32_t{0x12345678UL}, 0) == uint32_t{0x12345678UL}, "");
static_assert(rotr(uint64_t{0x12345678ABCDEF01ULL}, 0) ==
uint64_t{0x12345678ABCDEF01ULL},
"");
EXPECT_EQ(rotr(uint8_t{0x12}, 0), uint8_t{0x12});
EXPECT_EQ(rotr(uint16_t{0x1234}, 0), uint16_t{0x1234});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 0), uint32_t{0x12345678UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 0),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, 8), uint8_t{0x12});
EXPECT_EQ(rotr(uint16_t{0x1234}, 16), uint16_t{0x1234});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 32), uint32_t{0x12345678UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, -8), uint8_t{0x12});
EXPECT_EQ(rotr(uint16_t{0x1234}, -16), uint16_t{0x1234});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, -32), uint32_t{0x12345678UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, -64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, 4), uint8_t{0x21});
EXPECT_EQ(rotr(uint16_t{0x1234}, 4), uint16_t{0x4123});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 4), uint32_t{0x81234567UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 4),
uint64_t{0x112345678ABCDEF0ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, -4), uint8_t{0x21});
EXPECT_EQ(rotr(uint16_t{0x1234}, -4), uint16_t{0x2341});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, -4), uint32_t{0x23456781UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, -4),
uint64_t{0x2345678ABCDEF011ULL});
}
TEST(Rotate, Symmetry) {
absl::BitGen rng;
constexpr int kTrials = 100;
for (int i = 0; i < kTrials; ++i) {
uint8_t value = absl::Uniform(rng, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint8_t>::digits,
2 * std::numeric_limits<uint8_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
for (int i = 0; i < kTrials; ++i) {
uint16_t value = absl::Uniform(rng, std::numeric_limits<uint16_t>::min(),
std::numeric_limits<uint16_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint16_t>::digits,
2 * std::numeric_limits<uint16_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
for (int i = 0; i < kTrials; ++i) {
uint32_t value = absl::Uniform(rng, std::numeric_limits<uint32_t>::min(),
std::numeric_limits<uint32_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint32_t>::digits,
2 * std::numeric_limits<uint32_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
for (int i = 0; i < kTrials; ++i) {
uint64_t value = absl::Uniform(rng, std::numeric_limits<uint64_t>::min(),
std::numeric_limits<uint64_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint64_t>::digits,
2 * std::numeric_limits<uint64_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
}
TEST(Counting, LeadingZeroes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_zero(uint8_t{}) == 8, "");
static_assert(countl_zero(static_cast<uint8_t>(-1)) == 0, "");
static_assert(countl_zero(uint16_t{}) == 16, "");
static_assert(countl_zero(static_cast<uint16_t>(-1)) == 0, "");
static_assert(countl_zero(uint32_t{}) == 32, "");
static_assert(countl_zero(~uint32_t{}) == 0, "");
static_assert(countl_zero(uint64_t{}) == 64, "");
static_assert(countl_zero(~uint64_t{}) == 0, "");
#endif
EXPECT_EQ(countl_zero(uint8_t{}), 8);
EXPECT_EQ(countl_zero(static_cast<uint8_t>(-1)), 0);
EXPECT_EQ(countl_zero(uint16_t{}), 16);
EXPECT_EQ(countl_zero(static_cast<uint16_t>(-1)), 0);
EXPECT_EQ(countl_zero(uint32_t{}), 32);
EXPECT_EQ(countl_zero(~uint32_t{}), 0);
EXPECT_EQ(countl_zero(uint64_t{}), 64);
EXPECT_EQ(countl_zero(~uint64_t{}), 0);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(countl_zero(static_cast<uint8_t>(1u << i)), 7 - i);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(countl_zero(static_cast<uint16_t>(1u << i)), 15 - i);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(countl_zero(uint32_t{1} << i), 31 - i);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(countl_zero(uint64_t{1} << i), 63 - i);
}
}
TEST(Counting, LeadingOnes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_one(uint8_t{}) == 0, "");
static_assert(countl_one(static_cast<uint8_t>(-1)) == 8, "");
static_assert(countl_one(uint16_t{}) == 0, "");
static_assert(countl_one(static_cast<uint16_t>(-1)) == 16, "");
static_assert(countl_one(uint32_t{}) == 0, "");
static_assert(countl_one(~uint32_t{}) == 32, "");
static_assert(countl_one(uint64_t{}) == 0, "");
static_assert(countl_one(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(countl_one(uint8_t{}), 0);
EXPECT_EQ(countl_one(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(countl_one(uint16_t{}), 0);
EXPECT_EQ(countl_one(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(countl_one(uint32_t{}), 0);
EXPECT_EQ(countl_one(~uint32_t{}), 32);
EXPECT_EQ(countl_one(uint64_t{}), 0);
EXPECT_EQ(countl_one(~uint64_t{}), 64);
}
TEST(Counting, TrailingZeroes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CTZ
static_assert(countr_zero(uint8_t{}) == 8, "");
static_assert(countr_zero(static_cast<uint8_t>(-1)) == 0, "");
static_assert(countr_zero(uint16_t{}) == 16, "");
static_assert(countr_zero(static_cast<uint16_t>(-1)) == 0, "");
static_assert(countr_zero(uint32_t{}) == 32, "");
static_assert(countr_zero(~uint32_t{}) == 0, "");
static_assert(countr_zero(uint64_t{}) == 64, "");
static_assert(countr_zero(~uint64_t{}) == 0, "");
#endif
EXPECT_EQ(countr_zero(uint8_t{}), 8);
EXPECT_EQ(countr_zero(static_cast<uint8_t>(-1)), 0);
EXPECT_EQ(countr_zero(uint16_t{}), 16);
EXPECT_EQ(countr_zero(static_cast<uint16_t>(-1)), 0);
EXPECT_EQ(countr_zero(uint32_t{}), 32);
EXPECT_EQ(countr_zero(~uint32_t{}), 0);
EXPECT_EQ(countr_zero(uint64_t{}), 64);
EXPECT_EQ(countr_zero(~uint64_t{}), 0);
}
TEST(Counting, TrailingOnes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CTZ
static_assert(countr_one(uint8_t{}) == 0, "");
static_assert(countr_one(static_cast<uint8_t>(-1)) == 8, "");
static_assert(countr_one(uint16_t{}) == 0, "");
static_assert(countr_one(static_cast<uint16_t>(-1)) == 16, "");
static_assert(countr_one(uint32_t{}) == 0, "");
static_assert(countr_one(~uint32_t{}) == 32, "");
static_assert(countr_one(uint64_t{}) == 0, "");
static_assert(countr_one(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(countr_one(uint8_t{}), 0);
EXPECT_EQ(countr_one(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(countr_one(uint16_t{}), 0);
EXPECT_EQ(countr_one(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(countr_one(uint32_t{}), 0);
EXPECT_EQ(countr_one(~uint32_t{}), 32);
EXPECT_EQ(countr_one(uint64_t{}), 0);
EXPECT_EQ(countr_one(~uint64_t{}), 64);
}
TEST(Counting, Popcount) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT
static_assert(popcount(uint8_t{}) == 0, "");
static_assert(popcount(uint8_t{1}) == 1, "");
static_assert(popcount(static_cast<uint8_t>(-1)) == 8, "");
static_assert(popcount(uint16_t{}) == 0, "");
static_assert(popcount(uint16_t{1}) == 1, "");
static_assert(popcount(static_cast<uint16_t>(-1)) == 16, "");
static_assert(popcount(uint32_t{}) == 0, "");
static_assert(popcount(uint32_t{1}) == 1, "");
static_assert(popcount(~uint32_t{}) == 32, "");
static_assert(popcount(uint64_t{}) == 0, "");
static_assert(popcount(uint64_t{1}) == 1, "");
static_assert(popcount(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(popcount(uint8_t{}), 0);
EXPECT_EQ(popcount(uint8_t{1}), 1);
EXPECT_EQ(popcount(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(popcount(uint16_t{}), 0);
EXPECT_EQ(popcount(uint16_t{1}), 1);
EXPECT_EQ(popcount(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(popcount(uint32_t{}), 0);
EXPECT_EQ(popcount(uint32_t{1}), 1);
EXPECT_EQ(popcount(~uint32_t{}), 32);
EXPECT_EQ(popcount(uint64_t{}), 0);
EXPECT_EQ(popcount(uint64_t{1}), 1);
EXPECT_EQ(popcount(~uint64_t{}), 64);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(popcount(static_cast<uint8_t>(uint8_t{1} << i)), 1);
EXPECT_EQ(popcount(static_cast<uint8_t>(static_cast<uint8_t>(-1) ^
(uint8_t{1} << i))),
7);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(popcount(static_cast<uint16_t>(uint16_t{1} << i)), 1);
EXPECT_EQ(popcount(static_cast<uint16_t>(static_cast<uint16_t>(-1) ^
(uint16_t{1} << i))),
15);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(popcount(uint32_t{1} << i), 1);
EXPECT_EQ(popcount(static_cast<uint32_t>(-1) ^ (uint32_t{1} << i)), 31);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(popcount(uint64_t{1} << i), 1);
EXPECT_EQ(popcount(static_cast<uint64_t>(-1) ^ (uint64_t{1} << i)), 63);
}
}
template <typename T>
struct PopcountInput {
T value = 0;
int expected = 0;
};
template <typename T>
PopcountInput<T> GeneratePopcountInput(absl::BitGen& gen) {
PopcountInput<T> ret;
for (int i = 0; i < std::numeric_limits<T>::digits; i++) {
bool coin = absl::Bernoulli(gen, 0.2);
if (coin) {
ret.value |= T{1} << i;
ret.expected++;
}
}
return ret;
}
TEST(Counting, PopcountFuzz) {
absl::BitGen rng;
constexpr int kTrials = 100;
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint8_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint16_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint32_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint64_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
}
TEST(IntegralPowersOfTwo, SingleBit) {
EXPECT_FALSE(has_single_bit(uint8_t{}));
EXPECT_FALSE(has_single_bit(static_cast<uint8_t>(-1)));
EXPECT_FALSE(has_single_bit(uint16_t{}));
EXPECT_FALSE(has_single_bit(static_cast<uint16_t>(-1)));
EXPECT_FALSE(has_single_bit(uint32_t{}));
EXPECT_FALSE(has_single_bit(~uint32_t{}));
EXPECT_FALSE(has_single_bit(uint64_t{}));
EXPECT_FALSE(has_single_bit(~uint64_t{}));
static_assert(!has_single_bit(0u), "");
static_assert(has_single_bit(1u), "");
static_assert(has_single_bit(2u), "");
static_assert(!has_single_bit(3u), "");
static_assert(has_single_bit(4u), "");
static_assert(!has_single_bit(1337u), "");
static_assert(has_single_bit(65536u), "");
static_assert(has_single_bit(uint32_t{1} << 30), "");
static_assert(has_single_bit(uint64_t{1} << 42), "");
EXPECT_FALSE(has_single_bit(0u));
EXPECT_TRUE(has_single_bit(1u));
EXPECT_TRUE(has_single_bit(2u));
EXPECT_FALSE(has_single_bit(3u));
EXPECT_TRUE(has_single_bit(4u));
EXPECT_FALSE(has_single_bit(1337u));
EXPECT_TRUE(has_single_bit(65536u));
EXPECT_TRUE(has_single_bit(uint32_t{1} << 30));
EXPECT_TRUE(has_single_bit(uint64_t{1} << 42));
EXPECT_TRUE(has_single_bit(
static_cast<uint8_t>(std::numeric_limits<uint8_t>::max() / 2 + 1)));
EXPECT_TRUE(has_single_bit(
static_cast<uint16_t>(std::numeric_limits<uint16_t>::max() / 2 + 1)));
EXPECT_TRUE(has_single_bit(
static_cast<uint32_t>(std::numeric_limits<uint32_t>::max() / 2 + 1)));
EXPECT_TRUE(has_single_bit(
static_cast<uint64_t>(std::numeric_limits<uint64_t>::max() / 2 + 1)));
}
template <typename T, T arg, T = bit_ceil(arg)>
bool IsBitCeilConstantExpression(int) {
return true;
}
template <typename T, T arg>
bool IsBitCeilConstantExpression(char) {
return false;
}
TEST(IntegralPowersOfTwo, Ceiling) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(bit_ceil(0u) == 1, "");
static_assert(bit_ceil(1u) == 1, "");
static_assert(bit_ceil(2u) == 2, "");
static_assert(bit_ceil(3u) == 4, "");
static_assert(bit_ceil(4u) == 4, "");
static_assert(bit_ceil(1337u) == 2048, "");
static_assert(bit_ceil(65536u) == 65536, "");
static_assert(bit_ceil(65536u - 1337u) == 65536, "");
static_assert(bit_ceil(uint32_t{0x80000000}) == uint32_t{0x80000000}, "");
static_assert(bit_ceil(uint64_t{0x40000000000}) == uint64_t{0x40000000000},
"");
static_assert(
bit_ceil(uint64_t{0x8000000000000000}) == uint64_t{0x8000000000000000},
"");
EXPECT_TRUE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x0}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x80}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x81}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint8_t, uint8_t{0xff}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x0}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x8000}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x8001}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint16_t, uint16_t{0xffff}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint32_t, uint32_t{0x0}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint32_t, uint32_t{0x80000000}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint32_t, uint32_t{0x80000001}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint32_t, uint32_t{0xffffffff}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint64_t, uint64_t{0x0}>(0)));
EXPECT_TRUE(
(IsBitCeilConstantExpression<uint64_t, uint64_t{0x8000000000000000}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint64_t, uint64_t{0x8000000000000001}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint64_t, uint64_t{0xffffffffffffffff}>(0)));
#endif
EXPECT_EQ(bit_ceil(0u), 1);
EXPECT_EQ(bit_ceil(1u), 1);
EXPECT_EQ(bit_ceil(2u), 2);
EXPECT_EQ(bit_ceil(3u), 4);
EXPECT_EQ(bit_ceil(4u), 4);
EXPECT_EQ(bit_ceil(1337u), 2048);
EXPECT_EQ(bit_ceil(65536u), 65536);
EXPECT_EQ(bit_ceil(65536u - 1337u), 65536);
EXPECT_EQ(bit_ceil(uint64_t{0x40000000000}), uint64_t{0x40000000000});
}
TEST(IntegralPowersOfTwo, Floor) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(bit_floor(0u) == 0, "");
static_assert(bit_floor(1u) == 1, "");
static_assert(bit_floor(2u) == 2, "");
static_assert(bit_floor(3u) == 2, "");
static_assert(bit_floor(4u) == 4, "");
static_assert(bit_floor(1337u) == 1024, "");
static_assert(bit_floor(65536u) == 65536, "");
static_assert(bit_floor(65536u - 1337u) == 32768, "");
static_assert(bit_floor(uint64_t{0x40000000000}) == uint64_t{0x40000000000},
"");
#endif
EXPECT_EQ(bit_floor(0u), 0);
EXPECT_EQ(bit_floor(1u), 1);
EXPECT_EQ(bit_floor(2u), 2);
EXPECT_EQ(bit_floor(3u), 2);
EXPECT_EQ(bit_floor(4u), 4);
EXPECT_EQ(bit_floor(1337u), 1024);
EXPECT_EQ(bit_floor(65536u), 65536);
EXPECT_EQ(bit_floor(65536u - 1337u), 32768);
EXPECT_EQ(bit_floor(uint64_t{0x40000000000}), uint64_t{0x40000000000});
for (int i = 0; i < 8; i++) {
uint8_t input = uint8_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(static_cast<uint8_t>(input + 1)), input);
}
}
for (int i = 0; i < 16; i++) {
uint16_t input = uint16_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(static_cast<uint16_t>(input + 1)), input);
}
}
for (int i = 0; i < 32; i++) {
uint32_t input = uint32_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(input + 1), input);
}
}
for (int i = 0; i < 64; i++) {
uint64_t input = uint64_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(input + 1), input);
}
}
}
TEST(IntegralPowersOfTwo, Width) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(bit_width(uint8_t{}) == 0, "");
static_assert(bit_width(uint8_t{1}) == 1, "");
static_assert(bit_width(uint8_t{3}) == 2, "");
static_assert(bit_width(static_cast<uint8_t>(-1)) == 8, "");
static_assert(bit_width(uint16_t{}) == 0, "");
static_assert(bit_width(uint16_t{1}) == 1, "");
static_assert(bit_width(uint16_t{3}) == 2, "");
static_assert(bit_width(static_cast<uint16_t>(-1)) == 16, "");
static_assert(bit_width(uint32_t{}) == 0, "");
static_assert(bit_width(uint32_t{1}) == 1, "");
static_assert(bit_width(uint32_t{3}) == 2, "");
static_assert(bit_width(~uint32_t{}) == 32, "");
static_assert(bit_width(uint64_t{}) == 0, "");
static_assert(bit_width(uint64_t{1}) == 1, "");
static_assert(bit_width(uint64_t{3}) == 2, "");
static_assert(bit_width(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(bit_width(uint8_t{}), 0);
EXPECT_EQ(bit_width(uint8_t{1}), 1);
EXPECT_EQ(bit_width(uint8_t{3}), 2);
EXPECT_EQ(bit_width(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(bit_width(uint16_t{}), 0);
EXPECT_EQ(bit_width(uint16_t{1}), 1);
EXPECT_EQ(bit_width(uint16_t{3}), 2);
EXPECT_EQ(bit_width(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(bit_width(uint32_t{}), 0);
EXPECT_EQ(bit_width(uint32_t{1}), 1);
EXPECT_EQ(bit_width(uint32_t{3}), 2);
EXPECT_EQ(bit_width(~uint32_t{}), 32);
EXPECT_EQ(bit_width(uint64_t{}), 0);
EXPECT_EQ(bit_width(uint64_t{1}), 1);
EXPECT_EQ(bit_width(uint64_t{3}), 2);
EXPECT_EQ(bit_width(~uint64_t{}), 64);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(bit_width(static_cast<uint8_t>(uint8_t{1} << i)), i + 1);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(bit_width(static_cast<uint16_t>(uint16_t{1} << i)), i + 1);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(bit_width(uint32_t{1} << i), i + 1);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(bit_width(uint64_t{1} << i), i + 1);
}
}
#if defined(__GNUC__)
static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT,
"popcount should be constexpr");
static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_CLZ, "clz should be constexpr");
static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_CTZ, "ctz should be constexpr");
#endif
}
ABSL_NAMESPACE_END
} | template <typename T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes(T x) {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
return sizeof(T) <= sizeof(uint16_t)
? CountLeadingZeroes16(static_cast<uint16_t>(x)) -
(std::numeric_limits<uint16_t>::digits -
std::numeric_limits<T>::digits)
: (sizeof(T) <= sizeof(uint32_t)
? CountLeadingZeroes32(static_cast<uint32_t>(x)) -
(std::numeric_limits<uint32_t>::digits -
std::numeric_limits<T>::digits)
: CountLeadingZeroes64(x));
} | TEST(Counting, LeadingZeroes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_zero(uint8_t{}) == 8, "");
static_assert(countl_zero(static_cast<uint8_t>(-1)) == 0, "");
static_assert(countl_zero(uint16_t{}) == 16, "");
static_assert(countl_zero(static_cast<uint16_t>(-1)) == 0, "");
static_assert(countl_zero(uint32_t{}) == 32, "");
static_assert(countl_zero(~uint32_t{}) == 0, "");
static_assert(countl_zero(uint64_t{}) == 64, "");
static_assert(countl_zero(~uint64_t{}) == 0, "");
#endif
EXPECT_EQ(countl_zero(uint8_t{}), 8);
EXPECT_EQ(countl_zero(static_cast<uint8_t>(-1)), 0);
EXPECT_EQ(countl_zero(uint16_t{}), 16);
EXPECT_EQ(countl_zero(static_cast<uint16_t>(-1)), 0);
EXPECT_EQ(countl_zero(uint32_t{}), 32);
EXPECT_EQ(countl_zero(~uint32_t{}), 0);
EXPECT_EQ(countl_zero(uint64_t{}), 64);
EXPECT_EQ(countl_zero(~uint64_t{}), 0);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(countl_zero(static_cast<uint8_t>(1u << i)), 7 - i);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(countl_zero(static_cast<uint16_t>(1u << i)), 15 - i);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(countl_zero(uint32_t{1} << i), 31 - i);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(countl_zero(uint64_t{1} << i), 63 - i);
}
}
TEST(Counting, LeadingOnes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_one(uint8_t{}) == 0, "");
static_assert(countl_one(static_cast<uint8_t>(-1)) == 8, "");
static_assert(countl_one(uint16_t{}) == 0, "");
static_assert(countl_one(static_cast<uint16_t>(-1)) == 16, "");
static_assert(countl_one(uint32_t{}) == 0, "");
static_assert(countl_one(~uint32_t{}) == 32, "");
static_assert(countl_one(uint64_t{}) == 0, "");
static_assert(countl_one(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(countl_one(uint8_t{}), 0);
EXPECT_EQ(countl_one(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(countl_one(uint16_t{}), 0);
EXPECT_EQ(countl_one(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(countl_one(uint32_t{}), 0);
EXPECT_EQ(countl_one(~uint32_t{}), 32);
EXPECT_EQ(countl_one(uint64_t{}), 0);
EXPECT_EQ(countl_one(~uint64_t{}), 64);
} |
#include "utils/cycle_detector.h"
#include <algorithm>
#include <optional>
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
namespace mlir {
namespace {
using NodeSet = llvm::DenseSet<int32_t>;
using OrderedNodeSet = OrderedSet<int32_t>;
template <typename T>
struct VecStruct {
using type = llvm::SmallVector<T, 4>;
};
template <typename T>
using Vec = typename VecStruct<T>::type;
struct Node {
int32_t rank;
bool visited;
void* data;
OrderedNodeSet in;
OrderedNodeSet out;
};
}
struct GraphCycles::Rep {
Vec<Node*> nodes;
Vec<int32_t> freeNodes;
Vec<int32_t> deltaf;
Vec<int32_t> deltab;
Vec<int32_t> list;
Vec<int32_t> merged;
Vec<int32_t> stack;
};
GraphCycles::GraphCycles(int32_t numNodes) : rep_(new Rep) {
rep_->nodes.reserve(numNodes);
for (int32_t i = 0; i < numNodes; ++i) {
Node* n = new Node;
n->visited = false;
n->data = nullptr;
n->rank = rep_->nodes.size();
rep_->nodes.push_back(n);
}
}
GraphCycles::~GraphCycles() {
for (Vec<Node*>::size_type i = 0, e = rep_->nodes.size(); i < e; ++i) {
delete rep_->nodes[i];
}
delete rep_;
}
bool GraphCycles::HasEdge(int32_t x, int32_t y) const {
return rep_->nodes[x]->out.Contains(y);
}
void GraphCycles::RemoveEdge(int32_t x, int32_t y) {
rep_->nodes[x]->out.Erase(y);
rep_->nodes[y]->in.Erase(x);
}
static bool forwardDfs(GraphCycles::Rep* r, int32_t n, int32_t upperBound);
static void backwardDfs(GraphCycles::Rep* r, int32_t n, int32_t lowerBound);
static void reorder(GraphCycles::Rep* r);
static void sort(const Vec<Node*>&, Vec<int32_t>* delta);
static void moveToList(GraphCycles::Rep* r, Vec<int32_t>* src,
Vec<int32_t>* dst);
static void clearVisitedBits(GraphCycles::Rep* r, const Vec<int32_t>& nodes);
bool GraphCycles::InsertEdge(int32_t x, int32_t y) {
if (x == y) return false;
Rep* r = rep_;
Node* nx = r->nodes[x];
if (!nx->out.Insert(y)) {
return true;
}
Node* ny = r->nodes[y];
ny->in.Insert(x);
if (nx->rank <= ny->rank) {
return true;
}
if (forwardDfs(r, y, nx->rank)) {
nx->out.Erase(y);
ny->in.Erase(x);
clearVisitedBits(r, r->deltaf);
return false;
}
backwardDfs(r, x, ny->rank);
reorder(r);
return true;
}
static bool forwardDfs(GraphCycles::Rep* r, int32_t n, int32_t upperBound) {
r->deltaf.clear();
r->stack.clear();
r->stack.push_back(n);
while (!r->stack.empty()) {
n = r->stack.back();
r->stack.pop_back();
Node* nn = r->nodes[n];
if (nn->visited) continue;
nn->visited = true;
r->deltaf.push_back(n);
for (auto w : nn->out.GetSequence()) {
Node* nw = r->nodes[w];
if (nw->rank == upperBound) {
return true;
}
if (!nw->visited && nw->rank < upperBound) {
r->stack.push_back(w);
}
}
}
return false;
}
static void backwardDfs(GraphCycles::Rep* r, int32_t n, int32_t lowerBound) {
r->deltab.clear();
r->stack.clear();
r->stack.push_back(n);
while (!r->stack.empty()) {
n = r->stack.back();
r->stack.pop_back();
Node* nn = r->nodes[n];
if (nn->visited) continue;
nn->visited = true;
r->deltab.push_back(n);
for (auto w : nn->in.GetSequence()) {
Node* nw = r->nodes[w];
if (!nw->visited && lowerBound < nw->rank) {
r->stack.push_back(w);
}
}
}
}
static void reorder(GraphCycles::Rep* r) {
sort(r->nodes, &r->deltab);
sort(r->nodes, &r->deltaf);
r->list.clear();
moveToList(r, &r->deltab, &r->list);
moveToList(r, &r->deltaf, &r->list);
r->merged.resize(r->deltab.size() + r->deltaf.size());
std::merge(r->deltab.begin(), r->deltab.end(), r->deltaf.begin(),
r->deltaf.end(), r->merged.begin());
for (Vec<int32_t>::size_type i = 0, e = r->list.size(); i < e; ++i) {
r->nodes[r->list[i]]->rank = r->merged[i];
}
}
static void sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
struct ByRank {
const Vec<Node*>* nodes;
bool operator()(int32_t a, int32_t b) const {
return (*nodes)[a]->rank < (*nodes)[b]->rank;
}
};
ByRank cmp;
cmp.nodes = &nodes;
std::sort(delta->begin(), delta->end(), cmp);
}
static void moveToList(GraphCycles::Rep* r, Vec<int32_t>* src,
Vec<int32_t>* dst) {
for (Vec<int32_t>::size_type i = 0, e = src->size(); i < e; i++) {
int32_t w = (*src)[i];
(*src)[i] = r->nodes[w]->rank;
r->nodes[w]->visited = false;
dst->push_back(w);
}
}
static void clearVisitedBits(GraphCycles::Rep* r, const Vec<int32_t>& nodes) {
for (Vec<int32_t>::size_type i = 0, e = nodes.size(); i < e; i++) {
r->nodes[nodes[i]]->visited = false;
}
}
bool GraphCycles::IsReachable(int32_t x, int32_t y) {
if (x == y) return true;
Rep* r = rep_;
Node* nx = r->nodes[x];
Node* ny = r->nodes[y];
if (nx->rank >= ny->rank) {
return false;
}
bool reachable = forwardDfs(r, x, ny->rank);
clearVisitedBits(r, r->deltaf);
return reachable;
}
std::optional<int32_t> GraphCycles::ContractEdge(int32_t a, int32_t b) {
assert(HasEdge(a, b));
RemoveEdge(a, b);
if (IsReachable(a, b)) {
InsertEdge(a, b);
return {};
}
if (rep_->nodes[b]->in.Size() + rep_->nodes[b]->out.Size() >
rep_->nodes[a]->in.Size() + rep_->nodes[a]->out.Size()) {
std::swap(a, b);
}
Node* nb = rep_->nodes[b];
OrderedNodeSet out = std::move(nb->out);
OrderedNodeSet in = std::move(nb->in);
for (int32_t y : out.GetSequence()) {
rep_->nodes[y]->in.Erase(b);
}
for (int32_t y : in.GetSequence()) {
rep_->nodes[y]->out.Erase(b);
}
rep_->freeNodes.push_back(b);
rep_->nodes[a]->out.Reserve(rep_->nodes[a]->out.Size() + out.Size());
for (int32_t y : out.GetSequence()) {
InsertEdge(a, y);
}
rep_->nodes[a]->in.Reserve(rep_->nodes[a]->in.Size() + in.Size());
for (int32_t y : in.GetSequence()) {
InsertEdge(y, a);
}
return a;
}
std::vector<int32_t> GraphCycles::SuccessorsCopy(int32_t node) const {
return rep_->nodes[node]->out.GetSequence();
}
namespace {
void sortInPostOrder(const Vec<Node*>& nodes, std::vector<int32_t>* toSort) {
std::sort(toSort->begin(), toSort->end(), [&](int32_t a, int32_t b) {
return nodes[a]->rank > nodes[b]->rank;
});
}
}
std::vector<int32_t> GraphCycles::AllNodesInPostOrder() const {
llvm::DenseSet<int32_t> freeNodesSet;
for (int32_t n : rep_->freeNodes) freeNodesSet.insert(n);
std::vector<int32_t> allNodes;
allNodes.reserve(rep_->nodes.size() - freeNodesSet.size());
for (size_t i = 0, e = rep_->nodes.size(); i < e; i++) {
if (!freeNodesSet.count(i)) {
allNodes.push_back(i);
}
}
sortInPostOrder(rep_->nodes, &allNodes);
return allNodes;
}
} | #include "utils/cycle_detector.h"
#include "xla/test.h"
class GraphCyclesTest : public ::testing::Test {
public:
GraphCyclesTest() : g_(100) {}
bool AddEdge(int x, int y) { return g_.InsertEdge(x, y); }
void AddMultiples() {
for (int x = 1; x < 25; x++) {
EXPECT_TRUE(AddEdge(x, 2 * x)) << x;
EXPECT_TRUE(AddEdge(x, 3 * x)) << x;
}
}
mlir::GraphCycles g_;
};
TEST_F(GraphCyclesTest, NoCycle) { AddMultiples(); }
TEST_F(GraphCyclesTest, SimpleCycle) {
AddMultiples();
EXPECT_FALSE(AddEdge(8, 4));
}
TEST_F(GraphCyclesTest, IndirectCycle) {
AddMultiples();
EXPECT_TRUE(AddEdge(16, 9));
EXPECT_FALSE(AddEdge(9, 2));
}
TEST_F(GraphCyclesTest, RemoveEdge) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
g_.RemoveEdge(2, 3);
EXPECT_FALSE(g_.HasEdge(2, 3));
}
TEST_F(GraphCyclesTest, IsReachable) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
EXPECT_TRUE(g_.IsReachable(1, 5));
EXPECT_FALSE(g_.IsReachable(5, 1));
}
TEST_F(GraphCyclesTest, ContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.ContractEdge(1, 3).has_value());
EXPECT_TRUE(g_.HasEdge(1, 3));
EXPECT_EQ(*g_.ContractEdge(1, 2), 2);
EXPECT_TRUE(g_.HasEdge(2, 3));
EXPECT_TRUE(g_.HasEdge(2, 4));
EXPECT_TRUE(g_.HasEdge(3, 4));
EXPECT_EQ(*g_.ContractEdge(2, 3), 2);
EXPECT_TRUE(g_.HasEdge(2, 4));
} | bool GraphCycles::InsertEdge(int32_t x, int32_t y) {
if (x == y) return false;
Rep* r = rep_;
Node* nx = r->nodes[x];
if (!nx->out.Insert(y)) {
return true;
}
Node* ny = r->nodes[y];
ny->in.Insert(x);
if (nx->rank <= ny->rank) {
return true;
}
if (forwardDfs(r, y, nx->rank)) {
nx->out.Erase(y);
ny->in.Erase(x);
clearVisitedBits(r, r->deltaf);
return false;
}
backwardDfs(r, x, ny->rank);
reorder(r);
return true;
} | TEST_F(GraphCyclesTest, NoCycle) { AddMultiples(); }
TEST_F(GraphCyclesTest, SimpleCycle) {
AddMultiples();
EXPECT_FALSE(AddEdge(8, 4));
}
TEST_F(GraphCyclesTest, IndirectCycle) {
AddMultiples();
EXPECT_TRUE(AddEdge(16, 9));
EXPECT_FALSE(AddEdge(9, 2));
}
TEST_F(GraphCyclesTest, RemoveEdge) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
g_.RemoveEdge(2, 3);
EXPECT_FALSE(g_.HasEdge(2, 3));
}
TEST_F(GraphCyclesTest, IsReachable) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
EXPECT_TRUE(g_.IsReachable(1, 5));
EXPECT_FALSE(g_.IsReachable(5, 1));
}
TEST_F(GraphCyclesTest, ContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.ContractEdge(1, 3).has_value());
EXPECT_TRUE(g_.HasEdge(1, 3));
EXPECT_EQ(*g_.ContractEdge(1, 2), 2);
EXPECT_TRUE(g_.HasEdge(2, 3));
EXPECT_TRUE(g_.HasEdge(2, 4));
EXPECT_TRUE(g_.HasEdge(3, 4));
EXPECT_EQ(*g_.ContractEdge(2, 3), 2);
EXPECT_TRUE(g_.HasEdge(2, 4));
} |
#include "tensorflow/cc/experimental/libtf/mlir/mlir_transform.h"
#include <string>
#include <utility>
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/cc/experimental/libtf/value.h"
#include "tensorflow/cc/saved_model/bundle_v2.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
namespace tf {
namespace libtf {
Handle LoadModule(Object self, String saved_model) {
tensorflow::SavedModelV2Bundle bundle;
tensorflow::Status status =
tensorflow::SavedModelV2Bundle::Load(saved_model.get(), &bundle);
if (!status.ok()) {
return None();
}
auto* context = self.Get<internal::Capsule>(String("_context"))
->cast<mlir::MLIRContext*>();
absl::Span<std::string> exported_names(nullptr, 0);
auto module_or =
tensorflow::ConvertSavedModelToMlir(&bundle, context, exported_names);
if (!module_or.status().ok()) {
return None();
}
Object obj;
obj.Set(
String("_module"),
Handle(impl::TaggedValue::Capsule(new mlir::OwningOpRef<mlir::ModuleOp>(
std::move(module_or).value()))));
auto get_string = [](Object self) {
auto ref = self.Get<internal::Capsule>(String("_module"))
->cast<mlir::OwningOpRef<mlir::ModuleOp>*>();
return String(tensorflow::MlirModuleToString(ref->get(), false).c_str());
};
obj.Set(String("ToString"), Callable(TFLIB_CALLABLE_ADAPTOR(get_string)));
return obj;
}
None SaveModule(Object self, Object module, String directory) {
return None();
}
None Transform(Object self, Object module, List passes) {
return None();
}
Object MLIR() {
Object obj;
obj.Set(String("LoadSavedModel"),
Callable(TFLIB_CALLABLE_ADAPTOR(LoadModule)));
obj.Set(String("SaveSavedModel"),
Callable(TFLIB_CALLABLE_ADAPTOR(SaveModule)));
obj.Set(String("_context"),
Handle(impl::TaggedValue::Capsule(new mlir::MLIRContext())));
return obj;
}
}
} | #include "tensorflow/cc/experimental/libtf/mlir/mlir_transform.h"
#include <iostream>
#include <string>
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
TEST(TransformTest, LoadSavedModel) {
Object mlir = MLIR();
TF_ASSERT_OK_AND_ASSIGN(Callable load,
mlir.Get<Callable>(String("LoadSavedModel")));
TF_ASSERT_OK_AND_ASSIGN(
Handle model_bad,
load.Call<Handle>(mlir, String("/error/doesnotexist___31284382")));
TF_ASSERT_OK(Cast<None>(model_bad).status());
const std::string model_good_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/cc/experimental/libtf/tests/testdata/simple-model");
TF_ASSERT_OK_AND_ASSIGN(
Object model_good,
load.Call<Object>(mlir, String(model_good_path.c_str())));
TF_ASSERT_OK_AND_ASSIGN(Callable to_string,
model_good.Get<Callable>(String("ToString")));
TF_ASSERT_OK_AND_ASSIGN(String s, to_string.Call<String>(model_good));
ASSERT_GT(strlen(s.get()), 0);
}
}
} | Object MLIR() {
Object obj;
obj.Set(String("LoadSavedModel"),
Callable(TFLIB_CALLABLE_ADAPTOR(LoadModule)));
obj.Set(String("SaveSavedModel"),
Callable(TFLIB_CALLABLE_ADAPTOR(SaveModule)));
obj.Set(String("_context"),
Handle(impl::TaggedValue::Capsule(new mlir::MLIRContext())));
return obj;
} | #include "tensorflow/cc/experimental/libtf/mlir/mlir_transform.h"
#include <iostream>
#include <string>
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
TEST(TransformTest, LoadSavedModel) {
Object mlir = MLIR();
TF_ASSERT_OK_AND_ASSIGN(Callable load,
mlir.Get<Callable>(String("LoadSavedModel")));
TF_ASSERT_OK_AND_ASSIGN(
Handle model_bad,
load.Call<Handle>(mlir, String("/error/doesnotexist___31284382")));
TF_ASSERT_OK(Cast<None>(model_bad).status());
const std::string model_good_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/cc/experimental/libtf/tests/testdata/simple-model");
TF_ASSERT_OK_AND_ASSIGN(
Object model_good,
load.Call<Object>(mlir, String(model_good_path.c_str())));
TF_ASSERT_OK_AND_ASSIGN(Callable to_string,
model_good.Get<Callable>(String("ToString")));
TF_ASSERT_OK_AND_ASSIGN(String s, to_string.Call<String>(model_good));
ASSERT_GT(strlen(s.get()), 0);
} |
#ifndef AROLLA_IO_INPLACE_LOADER_H_
#define AROLLA_IO_INPLACE_LOADER_H_
#include <cstddef>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla {
template <class Struct>
class InplaceSlotBuilder final {
static_assert(
std::is_standard_layout<Struct>::value,
"Data must be standard layout to be used with InplaceSlotBuilder.");
public:
using value_type = Struct;
absl::flat_hash_map<std::string, TypedSlot> OutputSlots(
FrameLayout::Slot<Struct> slot) const {
absl::flat_hash_map<std::string, TypedSlot> slots;
slots.reserve(zero_based_slots_.size());
for (const auto& name_slot : zero_based_slots_) {
slots.insert({name_slot.first,
TypedSlot::UnsafeFromOffset(
name_slot.second.GetType(),
name_slot.second.byte_offset() + slot.byte_offset())});
}
return slots;
}
absl::Status AddUnsafeField(const std::string& name, QTypePtr type,
size_t field_offset) {
if (!zero_based_slots_
.insert({name, TypedSlot::UnsafeFromOffset(type, field_offset)})
.second) {
return absl::FailedPreconditionError(absl::StrCat(
"InplaceLoaderBuilder: duplicated slot name: '", name, "'"));
}
return absl::OkStatus();
}
private:
absl::flat_hash_map<std::string, TypedSlot> zero_based_slots_;
};
#define AROLLA_ADD_INPLACE_SLOT_FIELD(builder, field, name) \
builder.AddUnsafeField( \
name, \
::arolla::GetQType< \
decltype(std::declval<decltype(builder)::value_type>().field)>(), \
offsetof(decltype(builder)::value_type, field))
}
#endif | #include "arolla/io/inplace_slot_builder.h"
#include <cstddef>
#include <cstdint>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
struct TestStruct {
int a;
double b;
};
TEST(ImplaceSlotTest, InplaceSlotBuilder) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<double>();
auto struct_slot = layout_builder.AddSlot<TestStruct>();
InplaceSlotBuilder<TestStruct> builder;
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, a, "a"));
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, b, "name_for_b"));
EXPECT_THAT(
builder.OutputSlots(struct_slot),
UnorderedElementsAre(
Pair("a",
TypedSlot::UnsafeFromOffset(
i32, offsetof(TestStruct, a) + struct_slot.byte_offset())),
Pair("name_for_b",
TypedSlot::UnsafeFromOffset(
f64, offsetof(TestStruct, b) + struct_slot.byte_offset()))));
}
TEST(InputLoaderTest, InplaceLoaderBuilderErrors) {
FrameLayout::Builder layout_builder;
InplaceSlotBuilder<TestStruct> builder;
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, a, "a"));
ASSERT_THAT(
AROLLA_ADD_INPLACE_SLOT_FIELD(builder, b, "a"),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("InplaceLoaderBuilder: duplicated slot name: 'a'")));
}
struct TestStructWithStruct {
int a;
TestStruct b;
};
TEST(InputLoaderTest, InplaceLoaderBuilderNestedStruct) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<double>();
auto struct_slot = layout_builder.AddSlot<TestStructWithStruct>();
InplaceSlotBuilder<TestStructWithStruct> builder;
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, a, "a"));
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, b.a, "b.a"));
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, b.b, "b.b"));
EXPECT_THAT(builder.OutputSlots(struct_slot),
UnorderedElementsAre(
Pair("a", TypedSlot::UnsafeFromOffset(
i32, offsetof(TestStructWithStruct, a) +
struct_slot.byte_offset())),
Pair("b.a", TypedSlot::UnsafeFromOffset(
i32, offsetof(TestStructWithStruct, b.a) +
struct_slot.byte_offset())),
Pair("b.b", TypedSlot::UnsafeFromOffset(
f64, offsetof(TestStructWithStruct, b.b) +
struct_slot.byte_offset()))));
}
TEST(StructReadOperatorTest, Reader) {
InplaceSlotBuilder<TestStructWithStruct> builder;
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, a, "a"));
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, b.b, "b.b"));
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<double>();
auto struct_slot = layout_builder.AddSlot<TestStructWithStruct>();
auto slots_map = builder.OutputSlots(struct_slot);
ASSERT_OK(RegisterUnsafeSlotsMap(slots_map, &layout_builder));
ASSERT_OK_AND_ASSIGN(auto slot_a, slots_map.at("a").ToSlot<int>());
ASSERT_OK_AND_ASSIGN(auto slot_b, slots_map.at("b.b").ToSlot<double>());
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
alloc.frame().Set(struct_slot, {3, {0, 5.5}});
EXPECT_EQ(alloc.frame().Get(slot_a), 3);
EXPECT_EQ(alloc.frame().Get(slot_b), 5.5);
TestStructWithStruct* input = alloc.frame().GetMutable(struct_slot);
input->a = 4;
input->b.b = 6.5;
EXPECT_EQ(alloc.frame().Get(slot_a), 4);
EXPECT_EQ(alloc.frame().Get(slot_b), 6.5);
EXPECT_EQ(alloc.frame().GetMutable(slot_a), &input->a);
EXPECT_EQ(alloc.frame().GetMutable(slot_b), &input->b.b);
}
}
}
namespace other_namespace {
namespace {
struct TestStruct {
int a;
double b;
};
TEST(InputLoaderTest, InplaceLoaderMacroCompilesInOtherNamespace) {
::arolla::InplaceSlotBuilder<TestStruct> builder;
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, a, "a"));
}
}
} | absl::flat_hash_map<std::string, TypedSlot> OutputSlots(
FrameLayout::Slot<Struct> slot) const {
absl::flat_hash_map<std::string, TypedSlot> slots;
slots.reserve(zero_based_slots_.size());
for (const auto& name_slot : zero_based_slots_) {
slots.insert({name_slot.first,
TypedSlot::UnsafeFromOffset(
name_slot.second.GetType(),
name_slot.second.byte_offset() + slot.byte_offset())});
}
return slots;
} | TEST(ImplaceSlotTest, InplaceSlotBuilder) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<double>();
auto struct_slot = layout_builder.AddSlot<TestStruct>();
InplaceSlotBuilder<TestStruct> builder;
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, a, "a"));
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, b, "name_for_b"));
EXPECT_THAT(
builder.OutputSlots(struct_slot),
UnorderedElementsAre(
Pair("a",
TypedSlot::UnsafeFromOffset(
i32, offsetof(TestStruct, a) + struct_slot.byte_offset())),
Pair("name_for_b",
TypedSlot::UnsafeFromOffset(
f64, offsetof(TestStruct, b) + struct_slot.byte_offset()))));
}
TEST(InputLoaderTest, InplaceLoaderBuilderNestedStruct) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<double>();
auto struct_slot = layout_builder.AddSlot<TestStructWithStruct>();
InplaceSlotBuilder<TestStructWithStruct> builder;
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, a, "a"));
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, b.a, "b.a"));
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, b.b, "b.b"));
EXPECT_THAT(builder.OutputSlots(struct_slot),
UnorderedElementsAre(
Pair("a", TypedSlot::UnsafeFromOffset(
i32, offsetof(TestStructWithStruct, a) +
struct_slot.byte_offset())),
Pair("b.a", TypedSlot::UnsafeFromOffset(
i32, offsetof(TestStructWithStruct, b.a) +
struct_slot.byte_offset())),
Pair("b.b", TypedSlot::UnsafeFromOffset(
f64, offsetof(TestStructWithStruct, b.b) +
struct_slot.byte_offset()))));
}
TEST(StructReadOperatorTest, Reader) {
InplaceSlotBuilder<TestStructWithStruct> builder;
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, a, "a"));
ASSERT_OK(AROLLA_ADD_INPLACE_SLOT_FIELD(builder, b.b, "b.b"));
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<double>();
auto struct_slot = layout_builder.AddSlot<TestStructWithStruct>();
auto slots_map = builder.OutputSlots(struct_slot);
ASSERT_OK(RegisterUnsafeSlotsMap(slots_map, &layout_builder));
ASSERT_OK_AND_ASSIGN(auto slot_a, slots_map.at("a").ToSlot<int>());
ASSERT_OK_AND_ASSIGN(auto slot_b, slots_map.at("b.b").ToSlot<double>());
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
alloc.frame().Set(struct_slot, {3, {0, 5.5}});
EXPECT_EQ(alloc.frame().Get(slot_a), 3);
EXPECT_EQ(alloc.frame().Get(slot_b), 5.5);
TestStructWithStruct* input = alloc.frame().GetMutable(struct_slot);
input->a = 4;
input->b.b = 6.5;
EXPECT_EQ(alloc.frame().Get(slot_a), 4);
EXPECT_EQ(alloc.frame().Get(slot_b), 6.5);
EXPECT_EQ(alloc.frame().GetMutable(slot_a), &input->a);
EXPECT_EQ(alloc.frame().GetMutable(slot_b), &input->b.b);
} |
#include "extensions/protobuf/internal/struct.h"
#include <string>
#include <utility>
#include "google/protobuf/struct.pb.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/functional/overload.h"
#include "absl/log/absl_check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#include "common/json.h"
#include "extensions/protobuf/internal/map_reflection.h"
#include "extensions/protobuf/internal/struct_lite.h"
#include "internal/status_macros.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/map_field.h"
#include "google/protobuf/message.h"
#include "google/protobuf/reflection.h"
namespace cel::extensions::protobuf_internal {
namespace {
absl::StatusOr<absl::Nonnull<const google::protobuf::Descriptor*>> GetDescriptor(
const google::protobuf::Message& message) {
const auto* descriptor = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(descriptor == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
return descriptor;
}
absl::StatusOr<absl::Nonnull<const google::protobuf::Reflection*>> GetReflection(
const google::protobuf::Message& message) {
const auto* reflection = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflection == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
return reflection;
}
absl::StatusOr<absl::Nonnull<const google::protobuf::FieldDescriptor*>> FindFieldByNumber(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor, int number) {
const auto* field = descriptor->FindFieldByNumber(number);
if (ABSL_PREDICT_FALSE(field == nullptr)) {
return absl::InternalError(
absl::StrCat(descriptor->full_name(),
" missing descriptor for field number: ", number));
}
return field;
}
absl::StatusOr<absl::Nonnull<const google::protobuf::OneofDescriptor*>> FindOneofByName(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor,
absl::string_view name) {
const auto* oneof = descriptor->FindOneofByName(name);
if (ABSL_PREDICT_FALSE(oneof == nullptr)) {
return absl::InternalError(absl::StrCat(
descriptor->full_name(), " missing descriptor for oneof: ", name));
}
return oneof;
}
absl::Status CheckFieldType(absl::Nonnull<const google::protobuf::FieldDescriptor*> field,
google::protobuf::FieldDescriptor::CppType type) {
if (ABSL_PREDICT_FALSE(field->cpp_type() != type)) {
return absl::InternalError(absl::StrCat(
field->full_name(), " has unexpected type: ", field->cpp_type_name()));
}
return absl::OkStatus();
}
absl::Status CheckFieldSingular(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field) {
if (ABSL_PREDICT_FALSE(field->is_repeated() || field->is_map())) {
return absl::InternalError(absl::StrCat(
field->full_name(), " has unexpected cardinality: REPEATED"));
}
return absl::OkStatus();
}
absl::Status CheckFieldRepeated(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field) {
if (ABSL_PREDICT_FALSE(!field->is_repeated() && !field->is_map())) {
return absl::InternalError(absl::StrCat(
field->full_name(), " has unexpected cardinality: SINGULAR"));
}
return absl::OkStatus();
}
absl::Status CheckFieldMap(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field) {
if (ABSL_PREDICT_FALSE(!field->is_map())) {
if (field->is_repeated()) {
return absl::InternalError(
absl::StrCat(field->full_name(),
" has unexpected type: ", field->cpp_type_name()));
} else {
return absl::InternalError(absl::StrCat(
field->full_name(), " has unexpected cardinality: SINGULAR"));
}
}
return absl::OkStatus();
}
absl::Status CheckFieldEnumType(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field,
absl::string_view name) {
CEL_RETURN_IF_ERROR(
CheckFieldType(field, google::protobuf::FieldDescriptor::CPPTYPE_ENUM));
if (ABSL_PREDICT_FALSE(field->enum_type()->full_name() != name)) {
return absl::InternalError(absl::StrCat(
field->full_name(),
" has unexpected type: ", field->enum_type()->full_name()));
}
return absl::OkStatus();
}
absl::Status CheckFieldMessageType(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field,
absl::string_view name) {
CEL_RETURN_IF_ERROR(
CheckFieldType(field, google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE));
if (ABSL_PREDICT_FALSE(field->message_type()->full_name() != name)) {
return absl::InternalError(absl::StrCat(
field->full_name(),
" has unexpected type: ", field->message_type()->full_name()));
}
return absl::OkStatus();
}
}
absl::StatusOr<Json> DynamicValueProtoToJson(const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if (ABSL_PREDICT_TRUE(desc == google::protobuf::Value::descriptor())) {
return GeneratedValueProtoToJson(
google::protobuf::DownCastToGenerated<google::protobuf::Value>(message));
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(const auto* kind_desc, FindOneofByName(desc, "kind"));
const auto* value_desc =
reflection->GetOneofFieldDescriptor(message, kind_desc);
if (value_desc == nullptr) {
return kJsonNull;
}
switch (value_desc->number()) {
case google::protobuf::Value::kNullValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldEnumType(value_desc, "google.protobuf.NullValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return kJsonNull;
case google::protobuf::Value::kNumberValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldType(value_desc, google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return reflection->GetDouble(message, value_desc);
case google::protobuf::Value::kStringValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldType(value_desc, google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return reflection->GetCord(message, value_desc);
case google::protobuf::Value::kBoolValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldType(value_desc, google::protobuf::FieldDescriptor::CPPTYPE_BOOL));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return reflection->GetBool(message, value_desc);
case google::protobuf::Value::kStructValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(value_desc, "google.protobuf.Struct"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return DynamicStructProtoToJson(
reflection->GetMessage(message, value_desc));
case google::protobuf::Value::kListValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(value_desc, "google.protobuf.ListValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return DynamicListValueProtoToJson(
reflection->GetMessage(message, value_desc));
default:
return absl::InternalError(
absl::StrCat(value_desc->full_name(),
" has unexpected number: ", value_desc->number()));
}
}
absl::StatusOr<Json> DynamicListValueProtoToJson(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.ListValue");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if (ABSL_PREDICT_TRUE(desc == google::protobuf::ListValue::descriptor())) {
return GeneratedListValueProtoToJson(
google::protobuf::DownCastToGenerated<google::protobuf::ListValue>(message));
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(
const auto* values_field,
FindFieldByNumber(desc, google::protobuf::ListValue::kValuesFieldNumber));
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(values_field, "google.protobuf.Value"));
CEL_RETURN_IF_ERROR(CheckFieldRepeated(values_field));
const auto& repeated_field_ref =
reflection->GetRepeatedFieldRef<google::protobuf::Message>(message, values_field);
JsonArrayBuilder builder;
builder.reserve(repeated_field_ref.size());
for (const auto& element : repeated_field_ref) {
CEL_ASSIGN_OR_RETURN(auto value, DynamicValueProtoToJson(element));
builder.push_back(std::move(value));
}
return std::move(builder).Build();
}
absl::StatusOr<Json> DynamicStructProtoToJson(const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Struct");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if (ABSL_PREDICT_TRUE(desc == google::protobuf::Struct::descriptor())) {
return GeneratedStructProtoToJson(
google::protobuf::DownCastToGenerated<google::protobuf::Struct>(message));
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(
const auto* fields_field,
FindFieldByNumber(desc, google::protobuf::Struct::kFieldsFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldMap(fields_field));
CEL_RETURN_IF_ERROR(CheckFieldType(fields_field->message_type()->map_key(),
google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldMessageType(
fields_field->message_type()->map_value(), "google.protobuf.Value"));
auto map_begin =
protobuf_internal::MapBegin(*reflection, message, *fields_field);
auto map_end = protobuf_internal::MapEnd(*reflection, message, *fields_field);
JsonObjectBuilder builder;
builder.reserve(
protobuf_internal::MapSize(*reflection, message, *fields_field));
for (; map_begin != map_end; ++map_begin) {
CEL_ASSIGN_OR_RETURN(
auto value,
DynamicValueProtoToJson(map_begin.GetValueRef().GetMessageValue()));
builder.insert_or_assign(absl::Cord(map_begin.GetKey().GetStringValue()),
std::move(value));
}
return std::move(builder).Build();
}
absl::Status DynamicValueProtoFromJson(const Json& json,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if (ABSL_PREDICT_TRUE(desc == google::protobuf::Value::descriptor())) {
return GeneratedValueProtoFromJson(
json, google::protobuf::DownCastToGenerated<google::protobuf::Value>(message));
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
return absl::visit(
absl::Overload(
[&message, &desc, &reflection](JsonNull) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* null_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kNullValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldEnumType(
null_value_field, "google.protobuf.NullValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(null_value_field));
reflection->SetEnumValue(&message, null_value_field, 0);
return absl::OkStatus();
},
[&message, &desc, &reflection](JsonBool value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* bool_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kBoolValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldType(
bool_value_field, google::protobuf::FieldDescriptor::CPPTYPE_BOOL));
CEL_RETURN_IF_ERROR(CheckFieldSingular(bool_value_field));
reflection->SetBool(&message, bool_value_field, value);
return absl::OkStatus();
},
[&message, &desc, &reflection](JsonNumber value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* number_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kNumberValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldType(
number_value_field, google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE));
CEL_RETURN_IF_ERROR(CheckFieldSingular(number_value_field));
reflection->SetDouble(&message, number_value_field, value);
return absl::OkStatus();
},
[&message, &desc,
&reflection](const JsonString& value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* string_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kStringValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldType(
string_value_field, google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldSingular(string_value_field));
reflection->SetString(&message, string_value_field,
static_cast<std::string>(value));
return absl::OkStatus();
},
[&message, &desc,
&reflection](const JsonArray& value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* list_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kListValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldMessageType(
list_value_field, "google.protobuf.ListValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(list_value_field));
return DynamicListValueProtoFromJson(
value, *reflection->MutableMessage(&message, list_value_field));
},
[&message, &desc,
&reflection](const JsonObject& value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* struct_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kStructValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldMessageType(
struct_value_field, "google.protobuf.Struct"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(struct_value_field));
return DynamicStructProtoFromJson(
value,
*reflection->MutableMessage(&message, struct_value_field));
}),
json);
}
absl::Status DynamicListValueProtoFromJson(const JsonArray& json,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.ListValue");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if (ABSL_PREDICT_TRUE(desc == google::protobuf::ListValue::descriptor())) {
return GeneratedListValueProtoFromJson(
json,
google::protobuf::DownCastToGenerated<google::protobuf::ListValue>(message));
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(
const auto* values_field,
FindFieldByNumber(desc, google::protobuf::ListValue::kValuesFieldNumber));
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(values_field, "google.protobuf.Value"));
CEL_RETURN_IF_ERROR(CheckFieldRepeated(values_field));
auto repeated_field_ref =
reflection->GetMutableRepeatedFieldRef<google::protobuf::Message>(&message,
values_field);
repeated_field_ref.Clear();
for (const auto& element : json) {
auto scratch = absl::WrapUnique(repeated_field_ref.NewMessage());
CEL_RETURN_IF_ERROR(DynamicValueProtoFromJson(element, *scratch));
repeated_field_ref.Add(*scratch);
}
return absl::OkStatus();
}
absl::Status DynamicStructProtoFromJson(const JsonObject& json,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Struct");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if (ABSL_PREDICT_TRUE(desc == google::protobuf::Struct::descriptor())) {
return GeneratedStructProtoFromJson(
json, google::protobuf::DownCastToGenerated<google::protobuf::Struct>(message));
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(
const auto* fields_field,
FindFieldByNumber(desc, google::protobuf::Struct::kFieldsFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldMap(fields_field));
CEL_RETURN_IF_ERROR(CheckFieldType(fields_field->message_type()->map_key(),
google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldMessageType(
fields_field->message_type()->map_value(), "google.protobuf.Value"));
for (const auto& entry : json) {
google::protobuf::MapKey map_key;
map_key.SetStringValue(static_cast<std::string>(entry.first));
google::protobuf::MapValueRef map_value;
protobuf_internal::InsertOrLookupMapValue(
*reflection, &message, *fields_field, map_key, &map_value);
CEL_RETURN_IF_ERROR(DynamicValueProtoFromJson(
entry.second, *map_value.MutableMessageValue()));
}
return absl::OkStatus();
}
} | #include "extensions/protobuf/internal/struct.h"
#include <memory>
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/log/absl_check.h"
#include "absl/memory/memory.h"
#include "common/json.h"
#include "extensions/protobuf/internal/struct_lite.h"
#include "internal/testing.h"
#include "testutil/util.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/descriptor_database.h"
#include "google/protobuf/dynamic_message.h"
#include "google/protobuf/text_format.h"
namespace cel::extensions::protobuf_internal {
namespace {
using ::google::api::expr::testutil::EqualsProto;
using testing::IsEmpty;
using testing::VariantWith;
using cel::internal::IsOkAndHolds;
template <typename T>
T ParseTextOrDie(absl::string_view text) {
T proto;
ABSL_CHECK(google::protobuf::TextFormat::ParseFromString(text, &proto));
return proto;
}
std::unique_ptr<google::protobuf::Message> ParseTextOrDie(
absl::string_view text, const google::protobuf::Message& prototype) {
auto message = absl::WrapUnique(prototype.New());
ABSL_CHECK(google::protobuf::TextFormat::ParseFromString(text, message.get()));
return message;
}
TEST(Value, Generated) {
google::protobuf::Value proto;
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
proto.set_null_value(google::protobuf::NULL_VALUE);
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(null_value: 0)pb")));
proto.set_bool_value(true);
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonBool>(true)));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(true), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(bool_value: true)pb")));
proto.set_number_value(1.0);
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonNumber>(1.0)));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(1.0), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(number_value: 1.0)pb")));
proto.set_string_value("foo");
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonString>(JsonString("foo"))));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(JsonString("foo")), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(string_value: "foo")pb")));
proto.mutable_list_value();
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonArray>(IsEmpty())));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(JsonArray()), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(list_value: {})pb")));
proto.mutable_struct_value();
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonObject>(IsEmpty())));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(JsonObject()), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(struct_value: {})pb")));
}
TEST(Value, Dynamic) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Value::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique(
factory.GetPrototype(pool.FindMessageTypeByName("google.protobuf.Value"))
->New());
const auto* reflection = proto->GetReflection();
const auto* descriptor = proto->GetDescriptor();
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
reflection->SetEnumValue(proto.get(),
descriptor->FindFieldByName("null_value"), 0);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(null_value: 0)pb", *proto)));
reflection->SetBool(proto.get(), descriptor->FindFieldByName("bool_value"),
true);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonBool>(true)));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(true), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(bool_value: true)pb", *proto)));
reflection->SetDouble(proto.get(),
descriptor->FindFieldByName("number_value"), 1.0);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonNumber>(1.0)));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(1.0), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(number_value: 1.0)pb", *proto)));
reflection->SetString(proto.get(),
descriptor->FindFieldByName("string_value"), "foo");
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonString>(JsonString("foo"))));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(JsonString("foo")), *proto));
EXPECT_THAT(*proto, EqualsProto(*ParseTextOrDie(R"pb(string_value: "foo")pb",
*proto)));
reflection->MutableMessage(
proto.get(), descriptor->FindFieldByName("list_value"), &factory);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonArray>(IsEmpty())));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(JsonArray()), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(list_value: {})pb", *proto)));
reflection->MutableMessage(
proto.get(), descriptor->FindFieldByName("struct_value"), &factory);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonObject>(IsEmpty())));
EXPECT_OK(DynamicValueProtoFromJson(Json(JsonObject()), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(struct_value: {})pb", *proto)));
}
}
} | absl::Status CheckFieldEnumType(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field,
absl::string_view name) {
CEL_RETURN_IF_ERROR(
CheckFieldType(field, google::protobuf::FieldDescriptor::CPPTYPE_ENUM));
if (ABSL_PREDICT_FALSE(field->enum_type()->full_name() != name)) {
return absl::InternalError(absl::StrCat(
field->full_name(),
" has unexpected type: ", field->enum_type()->full_name()));
}
return absl::OkStatus();
} | TEST(Value, Dynamic) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Value::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique(
factory.GetPrototype(pool.FindMessageTypeByName("google.protobuf.Value"))
->New());
const auto* reflection = proto->GetReflection();
const auto* descriptor = proto->GetDescriptor();
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
reflection->SetEnumValue(proto.get(),
descriptor->FindFieldByName("null_value"), 0);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(null_value: 0)pb", *proto)));
reflection->SetBool(proto.get(), descriptor->FindFieldByName("bool_value"),
true);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonBool>(true)));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(true), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(bool_value: true)pb", *proto)));
reflection->SetDouble(proto.get(),
descriptor->FindFieldByName("number_value"), 1.0);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonNumber>(1.0)));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(1.0), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(number_value: 1.0)pb", *proto)));
reflection->SetString(proto.get(),
descriptor->FindFieldByName("string_value"), "foo");
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonString>(JsonString("foo"))));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(JsonString("foo")), *proto));
EXPECT_THAT(*proto, EqualsProto(*ParseTextOrDie(R"pb(string_value: "foo")pb",
*proto)));
reflection->MutableMessage(
proto.get(), descriptor->FindFieldByName("list_value"), &factory);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonArray>(IsEmpty())));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(JsonArray()), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(list_value: {})pb", *proto)));
reflection->MutableMessage(
proto.get(), descriptor->FindFieldByName("struct_value"), &factory);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonObject>(IsEmpty())));
EXPECT_OK(DynamicValueProtoFromJson(Json(JsonObject()), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(struct_value: {})pb", *proto)));
} |
#include "quiche/quic/core/qpack/qpack_encoder_stream_sender.h"
#include <cstddef>
#include <limits>
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_instructions.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
constexpr uint64_t kMaxBytesBufferedByStream = 64 * 1024;
}
QpackEncoderStreamSender::QpackEncoderStreamSender(
HuffmanEncoding huffman_encoding)
: delegate_(nullptr), instruction_encoder_(huffman_encoding) {}
void QpackEncoderStreamSender::SendInsertWithNameReference(
bool is_static, uint64_t name_index, absl::string_view value) {
instruction_encoder_.Encode(
QpackInstructionWithValues::InsertWithNameReference(is_static, name_index,
value),
&buffer_);
}
void QpackEncoderStreamSender::SendInsertWithoutNameReference(
absl::string_view name, absl::string_view value) {
instruction_encoder_.Encode(
QpackInstructionWithValues::InsertWithoutNameReference(name, value),
&buffer_);
}
void QpackEncoderStreamSender::SendDuplicate(uint64_t index) {
instruction_encoder_.Encode(QpackInstructionWithValues::Duplicate(index),
&buffer_);
}
void QpackEncoderStreamSender::SendSetDynamicTableCapacity(uint64_t capacity) {
instruction_encoder_.Encode(
QpackInstructionWithValues::SetDynamicTableCapacity(capacity), &buffer_);
}
bool QpackEncoderStreamSender::CanWrite() const {
return delegate_ && delegate_->NumBytesBuffered() + buffer_.size() <=
kMaxBytesBufferedByStream;
}
void QpackEncoderStreamSender::Flush() {
if (buffer_.empty()) {
return;
}
delegate_->WriteStreamData(buffer_);
buffer_.clear();
}
} | #include "quiche/quic/core/qpack/qpack_encoder_stream_sender.h"
#include <string>
#include "absl/strings/escaping.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/qpack/qpack_test_utils.h"
using ::testing::Eq;
using ::testing::StrictMock;
namespace quic {
namespace test {
namespace {
class QpackEncoderStreamSenderTest : public QuicTestWithParam<bool> {
protected:
QpackEncoderStreamSenderTest() : stream_(HuffmanEncoding()) {
stream_.set_qpack_stream_sender_delegate(&delegate_);
}
~QpackEncoderStreamSenderTest() override = default;
bool DisableHuffmanEncoding() { return GetParam(); }
HuffmanEncoding HuffmanEncoding() {
return DisableHuffmanEncoding() ? HuffmanEncoding::kDisabled
: HuffmanEncoding::kEnabled;
}
StrictMock<MockQpackStreamSenderDelegate> delegate_;
QpackEncoderStreamSender stream_;
};
INSTANTIATE_TEST_SUITE_P(DisableHuffmanEncoding, QpackEncoderStreamSenderTest,
testing::Values(false, true));
TEST_P(QpackEncoderStreamSenderTest, InsertWithNameReference) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
std::string expected_encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("c500", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithNameReference(true, 5, "");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
if (DisableHuffmanEncoding()) {
ASSERT_TRUE(absl::HexStringToBytes("c203666f6f", &expected_encoded_data));
} else {
ASSERT_TRUE(absl::HexStringToBytes("c28294e7", &expected_encoded_data));
}
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithNameReference(true, 2, "foo");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("bf4a03626172", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithNameReference(false, 137, "bar");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes(
"aa7f005a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a",
&expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithNameReference(false, 42, std::string(127, 'Z'));
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
}
TEST_P(QpackEncoderStreamSenderTest, InsertWithoutNameReference) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
std::string expected_encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("4000", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference("", "");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
if (DisableHuffmanEncoding()) {
ASSERT_TRUE(
absl::HexStringToBytes("43666f6f03666f6f", &expected_encoded_data));
} else {
ASSERT_TRUE(absl::HexStringToBytes("6294e78294e7", &expected_encoded_data));
}
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference("foo", "foo");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(
absl::HexStringToBytes("4362617203626172", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference("bar", "bar");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes(
"5f005a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a7f"
"005a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a",
&expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference(std::string(31, 'Z'),
std::string(127, 'Z'));
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
}
TEST_P(QpackEncoderStreamSenderTest, Duplicate) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
std::string expected_encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("11", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendDuplicate(17);
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("1fd503", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendDuplicate(500);
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
}
TEST_P(QpackEncoderStreamSenderTest, SetDynamicTableCapacity) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
std::string expected_encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("31", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendSetDynamicTableCapacity(17);
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
EXPECT_EQ(0u, stream_.BufferedByteCount());
ASSERT_TRUE(absl::HexStringToBytes("3fd503", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendSetDynamicTableCapacity(500);
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
EXPECT_EQ(0u, stream_.BufferedByteCount());
}
TEST_P(QpackEncoderStreamSenderTest, Coalesce) {
stream_.SendInsertWithNameReference(true, 5, "");
stream_.SendInsertWithNameReference(true, 2, "foo");
stream_.SendInsertWithoutNameReference("foo", "foo");
stream_.SendDuplicate(17);
std::string expected_encoded_data;
if (DisableHuffmanEncoding()) {
ASSERT_TRUE(absl::HexStringToBytes(
"c500"
"c203666f6f"
"43666f6f03666f6f"
"11",
&expected_encoded_data));
} else {
ASSERT_TRUE(absl::HexStringToBytes(
"c500"
"c28294e7"
"6294e78294e7"
"11",
&expected_encoded_data));
}
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
EXPECT_EQ(0u, stream_.BufferedByteCount());
}
TEST_P(QpackEncoderStreamSenderTest, FlushEmpty) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
stream_.Flush();
EXPECT_EQ(0u, stream_.BufferedByteCount());
}
}
}
} | void QpackEncoderStreamSender::SendInsertWithoutNameReference(
absl::string_view name, absl::string_view value) {
instruction_encoder_.Encode(
QpackInstructionWithValues::InsertWithoutNameReference(name, value),
&buffer_);
} | TEST_P(QpackEncoderStreamSenderTest, InsertWithoutNameReference) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
std::string expected_encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("4000", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference("", "");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
if (DisableHuffmanEncoding()) {
ASSERT_TRUE(
absl::HexStringToBytes("43666f6f03666f6f", &expected_encoded_data));
} else {
ASSERT_TRUE(absl::HexStringToBytes("6294e78294e7", &expected_encoded_data));
}
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference("foo", "foo");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(
absl::HexStringToBytes("4362617203626172", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference("bar", "bar");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes(
"5f005a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a7f"
"005a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a",
&expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference(std::string(31, 'Z'),
std::string(127, 'Z'));
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
}
TEST_P(QpackEncoderStreamSenderTest, Coalesce) {
stream_.SendInsertWithNameReference(true, 5, "");
stream_.SendInsertWithNameReference(true, 2, "foo");
stream_.SendInsertWithoutNameReference("foo", "foo");
stream_.SendDuplicate(17);
std::string expected_encoded_data;
if (DisableHuffmanEncoding()) {
ASSERT_TRUE(absl::HexStringToBytes(
"c500"
"c203666f6f"
"43666f6f03666f6f"
"11",
&expected_encoded_data));
} else {
ASSERT_TRUE(absl::HexStringToBytes(
"c500"
"c28294e7"
"6294e78294e7"
"11",
&expected_encoded_data));
}
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
EXPECT_EQ(0u, stream_.BufferedByteCount());
} |
#include "arolla/expr/optimization/peephole_optimizations/const_with_shape.h"
#include <initializer_list>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
struct OpRecord {
const char* const from_op;
const char* const to_op;
};
constexpr std::initializer_list<OpRecord> kUnaryPointwiseOps = {
{"bool.logical_not", "bool.logical_not"},
{"core.has._array", "core.has"},
{"core.has._optional", "core.has"},
{"core.presence_not._builtin", "core.presence_not"},
{"core.to_bool", "core.to_bool"},
{"core.to_float32", "core.to_float32"},
{"core.to_float64", "core.to_float64"},
{"core.to_int32", "core.to_int32"},
{"core.to_int64", "core.to_int64"},
{"core.to_optional._scalar", "core.to_optional"},
{"core.to_uint64", "core.to_uint64"},
{"math.abs", "math.abs"},
{"math.ceil", "math.ceil"},
{"math.exp", "math.exp"},
{"math.expm1", "math.expm1"},
{"math.floor", "math.floor"},
{"math.is_finite", "math.is_finite"},
{"math.is_inf", "math.is_inf"},
{"math.is_nan", "math.is_nan"},
{"math.log", "math.log"},
{"math.log10", "math.log10"},
{"math.log1p", "math.log1p"},
{"math.log2", "math.log2"},
{"math.logit", "math.logit"},
{"math.neg", "math.neg"},
{"math.pos", "math.pos"},
{"math.round", "math.round"},
{"math.sigmoid", "math.sigmoid"},
{"math.sign", "math.sign"},
};
constexpr std::initializer_list<OpRecord> kBinaryPointwiseOps = {
{"bool.equal", "bool.equal"},
{"bool.less", "bool.less"},
{"bool.less_equal", "bool.less_equal"},
{"bool.logical_and", "bool.logical_and"},
{"bool.logical_or", "bool.logical_or"},
{"bool.not_equal", "bool.not_equal"},
{"core.equal", "core.equal"},
{"core.less", "core.less"},
{"core.less_equal", "core.less_equal"},
{"core.not_equal", "core.not_equal"},
{"core.presence_and", "core.presence_and"},
{"core.presence_or", "core.presence_or"},
{"math._pow", "math.pow"},
{"math.add", "math.add"},
{"math.divide", "math.divide"},
{"math.floordiv", "math.floordiv"},
{"math.fmod", "math.fmod"},
{"math.max", "math.max"},
{"math.min", "math.min"},
{"math.mod", "math.mod"},
{"math.multiply", "math.multiply"},
{"math.subtract", "math.subtract"},
};
absl::Status AddUnaryPointwiseOpOptimizations(
PeepholeOptimizationPack& optimizations) {
ExprNodePtr value = Placeholder("value");
ExprNodePtr shape = Placeholder("shape");
for (const auto& [from_op, to_op] : kUnaryPointwiseOps) {
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference(from_op,
{CallOpReference("core.const_with_shape._array_shape",
{shape, value})}));
ASSIGN_OR_RETURN(ExprNodePtr to,
CallOpReference("core.const_with_shape",
{shape, CallOpReference(to_op, {value})}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, to));
}
return absl::OkStatus();
}
bool IsBaseQType(const ExprNodePtr& node) {
return IsScalarQType(DecayOptionalQType(node->qtype()));
}
absl::Status AddBinaryPointwiseOpOptimizations(
PeepholeOptimizationPack& optimizations) {
ExprNodePtr a = Placeholder("a");
ExprNodePtr b = Placeholder("b");
ExprNodePtr shape = Placeholder("shape");
for (const auto& [from_op, to_op] : kBinaryPointwiseOps) {
ASSIGN_OR_RETURN(ExprNodePtr to,
CallOpReference("core.const_with_shape",
{shape, CallOpReference(to_op, {a, b})}));
ASSIGN_OR_RETURN(
ExprNodePtr expanded_a,
CallOpReference("core.const_with_shape._array_shape", {shape, a}));
ASSIGN_OR_RETURN(
ExprNodePtr expanded_b,
CallOpReference("core.const_with_shape._array_shape", {shape, b}));
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference(from_op, {expanded_a, expanded_b}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, to));
}
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference(from_op, {expanded_a, b}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, to, {{"b", IsBaseQType}}));
}
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference(from_op, {a, expanded_b}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, to, {{"a", IsBaseQType}}));
}
}
return absl::OkStatus();
}
absl::Status AddArrayShapeOfOptimizations(
PeepholeOptimizationPack& optimizations) {
ExprNodePtr a = Placeholder("a");
ExprNodePtr shape = Placeholder("shape");
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference(
"core._array_shape_of",
{CallOpReference(
"core.has._array",
{CallOpReference("core.const_with_shape._array_shape",
{shape, a})})}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, shape));
}
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference("core._array_shape_of",
{CallOpReference("core.const_with_shape._array_shape",
{shape, a})}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, shape));
}
return absl::OkStatus();
}
}
absl::StatusOr<PeepholeOptimizationPack> ConstWithShapeOptimizations() {
PeepholeOptimizationPack optimizations;
RETURN_IF_ERROR(AddArrayShapeOfOptimizations(optimizations));
RETURN_IF_ERROR(AddUnaryPointwiseOpOptimizations(optimizations));
RETURN_IF_ERROR(AddBinaryPointwiseOpOptimizations(optimizations));
return optimizations;
}
} | #include "arolla/expr/optimization/peephole_optimizations/const_with_shape.h"
#include <memory>
#include <optional>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/statusor.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithQTypeAnnotation;
class ConstWithShapeOptimizationsTest : public ::testing::Test {
protected:
void SetUp() override {
ASSERT_OK(InitArolla());
ASSERT_OK_AND_ASSIGN(
optimizer_, CreatePeepholeOptimizer({ConstWithShapeOptimizations}));
GetDenseArrayQType<float>();
GetDenseArrayQType<Unit>();
}
absl::StatusOr<ExprNodePtr> ApplyOptimizer(
absl::StatusOr<ExprNodePtr> status_or_expr) const {
ASSIGN_OR_RETURN(auto expr, ToLowest(status_or_expr));
return ToLowest(optimizer_->ApplyToNode(expr));
}
absl::StatusOr<ExprNodePtr> ToLowest(
const absl::StatusOr<ExprNodePtr>& status_or_expr) const {
if (!status_or_expr.ok()) {
return std::move(status_or_expr).status();
}
return ::arolla::expr::ToLowest(*status_or_expr);
}
std::unique_ptr<PeepholeOptimizer> optimizer_;
};
TEST_F(ConstWithShapeOptimizationsTest, UnaryPointwiseOpOptimizations) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"math.exp", {CallOp("core.const_with_shape", {shape, x_plus_y})})));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(CallOp("core.const_with_shape",
{shape, CallOp("math.exp", {x_plus_y})})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"core.has", {CallOp("core.const_with_shape", {shape, x_plus_y})})));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(CallOp("core.const_with_shape", {shape, Literal(Unit{})})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
TEST_F(ConstWithShapeOptimizationsTest, BinaryPointwiseOpOptimizations) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(auto x_minus_y, CallOp("math.subtract", {x, y}));
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("core.equal",
{CallOp("core.const_with_shape", {shape, x_plus_y}),
CallOp("core.const_with_shape", {shape, x_minus_y})})));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(CallOp("core.const_with_shape",
{shape, CallOp("core.equal", {x_plus_y, x_minus_y})})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
TEST_F(ConstWithShapeOptimizationsTest, BinaryOpWithConstantOptimizations) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(
auto x, WithQTypeAnnotation(Leaf("x"), GetOptionalQType<float>()));
ASSERT_OK_AND_ASSIGN(
auto y, WithQTypeAnnotation(Leaf("y"), GetOptionalQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(auto x_minus_y, CallOp("math.subtract", {x, y}));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(
CallOp("core.const_with_shape",
{shape, CallOp("core.presence_or", {x_plus_y, x_minus_y})})));
{
SCOPED_TRACE("left expanded, right is not expanded");
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"core.presence_or",
{CallOp("core.const_with_shape", {shape, x_plus_y}), x_minus_y})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
SCOPED_TRACE("left is not expanded, right is expanded");
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"core.presence_or",
{x_plus_y, CallOp("core.const_with_shape", {shape, x_minus_y})})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
TEST_F(ConstWithShapeOptimizationsTest, ArrayShapeOptimizations) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"core.shape_of", {CallOp("core.has", {CallOp("core.const_with_shape",
{shape, x_plus_y})})})));
EXPECT_THAT(actual_expr, EqualsExpr(shape));
}
TEST_F(ConstWithShapeOptimizationsTest, ArrayShapeOptimizationsForPresence) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("core.shape_of",
{CallOp("core.const_with_shape",
{shape, Literal<OptionalUnit>(std::nullopt)})})));
EXPECT_THAT(actual_expr, EqualsExpr(shape));
}
}
} | absl::Status AddArrayShapeOfOptimizations(
PeepholeOptimizationPack& optimizations) {
ExprNodePtr a = Placeholder("a");
ExprNodePtr shape = Placeholder("shape");
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference(
"core._array_shape_of",
{CallOpReference(
"core.has._array",
{CallOpReference("core.const_with_shape._array_shape",
{shape, a})})}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, shape));
}
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference("core._array_shape_of",
{CallOpReference("core.const_with_shape._array_shape",
{shape, a})}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, shape));
}
return absl::OkStatus();
} | TEST_F(ConstWithShapeOptimizationsTest, ArrayShapeOptimizations) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"core.shape_of", {CallOp("core.has", {CallOp("core.const_with_shape",
{shape, x_plus_y})})})));
EXPECT_THAT(actual_expr, EqualsExpr(shape));
}
TEST_F(ConstWithShapeOptimizationsTest, ArrayShapeOptimizationsForPresence) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("core.shape_of",
{CallOp("core.const_with_shape",
{shape, Literal<OptionalUnit>(std::nullopt)})})));
EXPECT_THAT(actual_expr, EqualsExpr(shape));
} |
#include "tensorflow/core/data/service/dispatcher_state.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/journal.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
DispatcherState::DispatcherState()
: worker_index_resolver_(std::vector<std::string>{}) {}
DispatcherState::DispatcherState(
const experimental::DispatcherConfig& dispatcher_config)
: worker_index_resolver_(dispatcher_config.worker_addresses()) {}
Status DispatcherState::Apply(const Update& update) {
switch (update.update_type_case()) {
case Update::kRegisterDataset:
RegisterDataset(update.register_dataset());
break;
case Update::kRegisterWorker:
RegisterWorker(update.register_worker());
break;
case Update::kCreateJob:
CreateJob(update.create_job());
break;
case Update::kCreateIteration:
CreateIteration(update.create_iteration());
break;
case Update::kProduceSplit:
ProduceSplit(update.produce_split());
break;
case Update::kAcquireIterationClient:
AcquireIterationClient(update.acquire_iteration_client());
break;
case Update::kReleaseIterationClient:
ReleaseIterationClient(update.release_iteration_client());
break;
case Update::kGarbageCollectIteration:
GarbageCollectIteration(update.garbage_collect_iteration());
break;
case Update::kRemoveTask:
RemoveTask(update.remove_task());
break;
case Update::kCreatePendingTask:
CreatePendingTask(update.create_pending_task());
break;
case Update::kClientHeartbeat:
ClientHeartbeat(update.client_heartbeat());
break;
case Update::kCreateTask:
CreateTask(update.create_task());
break;
case Update::kFinishTask:
FinishTask(update.finish_task());
break;
case Update::kSnapshot:
Snapshot(update.snapshot());
break;
case Update::kCompressionDisabledAtRuntime:
CompressionDisabledAtRuntime(update.compression_disabled_at_runtime());
break;
case Update::UPDATE_TYPE_NOT_SET:
return errors::Internal("Update type not set.");
}
return absl::OkStatus();
}
void DispatcherState::RegisterDataset(
const RegisterDatasetUpdate& register_dataset) {
std::string dataset_id = register_dataset.dataset_id();
auto dataset =
std::make_shared<Dataset>(dataset_id, register_dataset.metadata());
DCHECK(!datasets_by_id_.contains(dataset_id));
datasets_by_id_[dataset_id] = dataset;
UpdateNextAvailableDatasetId();
}
void DispatcherState::RegisterWorker(
const RegisterWorkerUpdate& register_worker) {
std::string address = register_worker.worker_address();
DCHECK(!workers_.contains(address));
workers_[address] = std::make_shared<Worker>(register_worker);
tasks_by_worker_[address] =
absl::flat_hash_map<int64_t, std::shared_ptr<Task>>();
worker_index_resolver_.AddWorker(address);
}
void DispatcherState::CreateJob(const CreateJobUpdate& create_job) {
int64_t job_id = create_job.job_id();
std::string job_name = create_job.job_name();
std::optional<int64_t> num_consumers;
if (create_job.optional_num_consumers_case() ==
CreateJobUpdate::kNumConsumers) {
num_consumers = create_job.num_consumers();
}
auto job = std::make_shared<Job>(
job_id, create_job.dataset_id(), create_job.processing_mode_def(),
job_name, num_consumers, create_job.use_cross_trainer_cache(),
create_job.target_workers());
DCHECK(!jobs_by_id_.contains(job_id));
jobs_by_id_[job_id] = job;
DCHECK(!jobs_by_name_.contains(job_name));
jobs_by_name_[job_name] = job;
next_available_job_id_ = std::max(next_available_job_id_, job_id + 1);
}
Status DispatcherState::JobFromId(int64_t job_id,
std::shared_ptr<const Job>& job) const {
auto it = jobs_by_id_.find(job_id);
if (it == jobs_by_id_.end()) {
return errors::NotFound("Job with id ", job_id, " not found");
}
job = it->second;
return absl::OkStatus();
}
Status DispatcherState::JobByName(const std::string& job_name,
std::shared_ptr<const Job>& job) const {
auto it = jobs_by_name_.find(job_name);
if (it == jobs_by_name_.end()) {
return errors::NotFound("Job with name ", job_name, " not found");
}
job = it->second;
return absl::OkStatus();
}
void DispatcherState::CreateIteration(
const CreateIterationUpdate& create_iteration) {
int64_t iteration_id = create_iteration.iteration_id();
int64_t job_id = create_iteration.job_id();
DCHECK(jobs_by_id_.contains(job_id));
auto& job = jobs_by_id_[job_id];
DCHECK(job);
IterationKey iteration_key(job->job_name, create_iteration.repetition());
auto iteration = std::make_shared<Iteration>(
iteration_id, iteration_key, create_iteration.num_split_providers(), job);
DCHECK(!iterations_.contains(iteration_id));
iterations_[iteration_id] = iteration;
tasks_by_iteration_[iteration_id] = std::vector<std::shared_ptr<Task>>();
DCHECK(!iterations_by_key_.contains(iteration_key) ||
iterations_by_key_[iteration_key]->garbage_collected);
iterations_by_key_[iteration_key] = iteration;
next_available_iteration_id_ =
std::max(next_available_iteration_id_, iteration_id + 1);
}
void DispatcherState::ProduceSplit(const ProduceSplitUpdate& produce_split) {
std::shared_ptr<Iteration> iteration =
iterations_[produce_split.iteration_id()];
DCHECK(iteration->distributed_epoch_state.has_value());
DistributedEpochState& state = iteration->distributed_epoch_state.value();
int64_t provider_index = produce_split.split_provider_index();
DCHECK_GE(produce_split.repetition(), state.repetitions[provider_index]);
state.repetitions[provider_index] = produce_split.repetition();
if (produce_split.finished()) {
state.repetitions[provider_index]++;
state.indices[provider_index] = 0;
return;
}
state.indices[provider_index]++;
}
void DispatcherState::AcquireIterationClient(
const AcquireIterationClientUpdate& acquire_iteration_client) {
int64_t iteration_client_id = acquire_iteration_client.iteration_client_id();
std::shared_ptr<Iteration>& iteration =
iterations_for_client_ids_[iteration_client_id];
DCHECK(!iteration);
iteration = iterations_[acquire_iteration_client.iteration_id()];
DCHECK(iteration);
iteration->num_clients++;
next_available_iteration_client_id_ =
std::max(next_available_iteration_client_id_, iteration_client_id + 1);
}
void DispatcherState::ReleaseIterationClient(
const ReleaseIterationClientUpdate& release_iteration_client) {
int64_t iteration_client_id = release_iteration_client.iteration_client_id();
std::shared_ptr<Iteration>& iteration =
iterations_for_client_ids_[iteration_client_id];
DCHECK(iteration);
iteration->num_clients--;
DCHECK_GE(iteration->num_clients, 0);
iteration->last_client_released_micros =
release_iteration_client.time_micros();
iterations_for_client_ids_.erase(iteration_client_id);
}
void DispatcherState::GarbageCollectIteration(
const GarbageCollectIterationUpdate& garbage_collect_iteration) {
int64_t iteration_id = garbage_collect_iteration.iteration_id();
for (auto& task : tasks_by_iteration_[iteration_id]) {
task->finished = true;
tasks_by_worker_[task->worker_address].erase(task->task_id);
}
iterations_[iteration_id]->finished = true;
iterations_[iteration_id]->garbage_collected = true;
}
void DispatcherState::RemoveTask(const RemoveTaskUpdate& remove_task) {
std::shared_ptr<Task>& task = tasks_[remove_task.task_id()];
DCHECK(task);
task->removed = true;
auto& tasks_for_iteration =
tasks_by_iteration_[task->iteration->iteration_id];
for (auto it = tasks_for_iteration.begin(); it != tasks_for_iteration.end();
++it) {
if ((*it)->task_id == task->task_id) {
tasks_for_iteration.erase(it);
break;
}
}
tasks_by_worker_[task->worker_address].erase(task->task_id);
tasks_.erase(task->task_id);
VLOG(1) << "Removed task " << remove_task.task_id() << " from worker "
<< task->worker_address;
}
void DispatcherState::CreatePendingTask(
const CreatePendingTaskUpdate& create_pending_task) {
int64_t task_id = create_pending_task.task_id();
auto& task = tasks_[task_id];
DCHECK_EQ(task, nullptr);
auto& iteration = iterations_[create_pending_task.iteration_id()];
DCHECK_NE(iteration, nullptr);
task = std::make_shared<Task>(create_pending_task, iteration);
iteration->pending_tasks.emplace(task, create_pending_task.starting_round());
tasks_by_worker_[create_pending_task.worker_address()][task->task_id] = task;
next_available_task_id_ = std::max(next_available_task_id_, task_id + 1);
}
void DispatcherState::ClientHeartbeat(
const ClientHeartbeatUpdate& client_heartbeat) {
int64_t iteration_client_id = client_heartbeat.iteration_client_id();
auto& iteration = iterations_for_client_ids_[iteration_client_id];
DCHECK(!iteration->pending_tasks.empty());
auto& task = iteration->pending_tasks.front();
if (client_heartbeat.has_task_rejected()) {
task.failures++;
task.ready_consumers.clear();
task.target_round = client_heartbeat.task_rejected().new_target_round();
}
if (client_heartbeat.task_accepted()) {
task.ready_consumers.insert(iteration_client_id);
if (task.ready_consumers.size() == iteration->job->num_consumers.value()) {
VLOG(1) << "Promoting task " << task.task->task_id
<< " from pending to active";
task.task->starting_round = task.target_round;
tasks_by_iteration_[iteration->iteration_id].push_back(task.task);
iteration->pending_tasks.pop();
}
}
}
void DispatcherState::CreateTask(const CreateTaskUpdate& create_task) {
int64_t task_id = create_task.task_id();
auto& task = tasks_[task_id];
DCHECK_EQ(task, nullptr);
auto& iteration = iterations_[create_task.iteration_id()];
DCHECK_NE(iteration, nullptr);
task = std::make_shared<Task>(create_task, iteration);
tasks_by_iteration_[create_task.iteration_id()].push_back(task);
tasks_by_worker_[create_task.worker_address()][task->task_id] = task;
next_available_task_id_ = std::max(next_available_task_id_, task_id + 1);
}
void DispatcherState::FinishTask(const FinishTaskUpdate& finish_task) {
VLOG(2) << "Marking task " << finish_task.task_id() << " as finished";
int64_t task_id = finish_task.task_id();
auto& task = tasks_[task_id];
DCHECK(task != nullptr);
task->finished = true;
tasks_by_worker_[task->worker_address].erase(task->task_id);
bool all_finished = true;
for (const auto& task_for_iteration :
tasks_by_iteration_[task->iteration->iteration_id]) {
if (!task_for_iteration->finished) {
all_finished = false;
}
}
VLOG(3) << "Iteration " << task->iteration->iteration_id
<< " finished: " << all_finished;
iterations_[task->iteration->iteration_id]->finished = all_finished;
}
std::string DispatcherState::NextAvailableDatasetId() const {
return absl::StrCat(next_available_dataset_id_);
}
void DispatcherState::UpdateNextAvailableDatasetId() {
while (datasets_by_id_.contains(absl::StrCat(next_available_dataset_id_))) {
++next_available_dataset_id_;
}
}
Status DispatcherState::DatasetFromId(
const std::string& id, std::shared_ptr<const Dataset>& dataset) const {
auto it = datasets_by_id_.find(id);
if (it == datasets_by_id_.end()) {
return errors::NotFound("Dataset id ", id, " not found");
}
dataset = it->second;
return absl::OkStatus();
}
Status DispatcherState::WorkerFromAddress(
const std::string& address, std::shared_ptr<const Worker>& worker) const {
auto it = workers_.find(address);
if (it == workers_.end()) {
return errors::NotFound("Worker with address ", address, " not found.");
}
worker = it->second;
return absl::OkStatus();
}
std::vector<std::shared_ptr<const DispatcherState::Worker>>
DispatcherState::ListWorkers() const {
std::vector<std::shared_ptr<const Worker>> workers;
workers.reserve(workers_.size());
for (const auto& it : workers_) {
workers.push_back(it.second);
}
return workers;
}
std::vector<std::shared_ptr<const DispatcherState::Iteration>>
DispatcherState::ListIterations() const {
std::vector<std::shared_ptr<const DispatcherState::Iteration>> iterations;
iterations.reserve(iterations_.size());
for (const auto& it : iterations_) {
iterations.push_back(it.second);
}
return iterations;
}
Status DispatcherState::IterationFromId(
int64_t id, std::shared_ptr<const Iteration>& iteration) const {
auto it = iterations_.find(id);
if (it == iterations_.end()) {
return errors::NotFound("Iteration id ", id, " not found");
}
iteration = it->second;
return absl::OkStatus();
}
Status DispatcherState::IterationByKey(
IterationKey iteration_key,
std::shared_ptr<const Iteration>& iteration) const {
auto it = iterations_by_key_.find(iteration_key);
if (it == iterations_by_key_.end()) {
return errors::NotFound("Iteration key ", iteration_key.DebugString(),
" not found");
}
iteration = it->second;
return absl::OkStatus();
}
int64_t DispatcherState::NextAvailableJobId() const {
return next_available_job_id_;
}
int64_t DispatcherState::NextAvailableIterationId() const {
return next_available_iteration_id_;
}
Status DispatcherState::IterationForIterationClientId(
int64_t iteration_client_id, std::shared_ptr<const Iteration>& iteration) {
iteration = iterations_for_client_ids_[iteration_client_id];
if (!iteration) {
return errors::NotFound("Iteration client id not found: ",
iteration_client_id);
}
return absl::OkStatus();
}
std::vector<int64_t> DispatcherState::ListActiveClientIds() {
std::vector<int64_t> ids;
for (const auto& it : iterations_for_client_ids_) {
if (it.second && !it.second->finished) {
ids.push_back(it.first);
}
}
return ids;
}
int64_t DispatcherState::NextAvailableIterationClientId() const {
return next_available_iteration_client_id_;
}
Status DispatcherState::TaskFromId(int64_t id,
std::shared_ptr<const Task>& task) const {
auto it = tasks_.find(id);
if (it == tasks_.end()) {
return errors::NotFound("Task ", id, " not found");
}
task = it->second;
return absl::OkStatus();
}
Status DispatcherState::TasksForIteration(
int64_t iteration_id,
std::vector<std::shared_ptr<const Task>>& tasks) const {
auto it = tasks_by_iteration_.find(iteration_id);
if (it == tasks_by_iteration_.end()) {
return errors::NotFound("Iteration ", iteration_id, " not found");
}
tasks.clear();
tasks.reserve(it->second.size());
for (const auto& task : it->second) {
tasks.push_back(task);
}
return absl::OkStatus();
}
Status DispatcherState::TasksForWorker(
absl::string_view worker_address,
std::vector<std::shared_ptr<const Task>>& tasks) const {
tasks.clear();
auto it = tasks_by_worker_.find(worker_address);
if (it == tasks_by_worker_.end()) {
return errors::NotFound("Worker ", worker_address, " not found");
}
const absl::flat_hash_map<int64_t, std::shared_ptr<Task>>& worker_tasks =
it->second;
tasks.reserve(worker_tasks.size());
for (const auto& task : worker_tasks) {
tasks.push_back(task.second);
}
return absl::OkStatus();
}
int64_t DispatcherState::NextAvailableTaskId() const {
return next_available_task_id_;
}
Status DispatcherState::ValidateWorker(absl::string_view worker_address) const {
return worker_index_resolver_.ValidateWorker(worker_address);
}
absl::StatusOr<int64_t> DispatcherState::GetWorkerIndex(
absl::string_view worker_address) const {
return worker_index_resolver_.GetWorkerIndex(worker_address);
}
void DispatcherState::Snapshot(const SnapshotUpdate& snapshot) {
snapshot_paths_.insert(snapshot.path());
}
void DispatcherState::CompressionDisabledAtRuntime(
const CompressionDisabledAtRuntimeUpdate& compression_disabled_at_runtime) {
compression_disabled_at_runtime_.insert({
compression_disabled_at_runtime.dataset_id(),
compression_disabled_at_runtime.compression_disabled(),
});
}
std::optional<bool> DispatcherState::CompressionDisabledAtRuntime(
const std::string& dataset_id) const {
if (auto it = compression_disabled_at_runtime_.find(dataset_id);
it != compression_disabled_at_runtime_.end()) {
return it->second;
}
return std::nullopt;
}
}
} | #include "tensorflow/core/data/service/dispatcher_state.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using Dataset = DispatcherState::Dataset;
using Worker = DispatcherState::Worker;
using IterationKey = DispatcherState::IterationKey;
using Job = DispatcherState::Job;
using Iteration = DispatcherState::Iteration;
using Task = DispatcherState::Task;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::StatusIs;
Status RegisterDataset(const std::string& dataset_id, DispatcherState& state) {
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id(dataset_id);
return state.Apply(update);
}
Status RegisterWorker(std::string worker_address, DispatcherState& state) {
Update update;
update.mutable_register_worker()->set_worker_address(worker_address);
return state.Apply(update);
}
Status CreateJob(int64_t job_id, const std::string& dataset_id,
const std::string& job_name, DispatcherState& state) {
Update update;
CreateJobUpdate* create_job = update.mutable_create_job();
create_job->set_job_id(job_id);
create_job->set_dataset_id(dataset_id);
create_job->set_job_name(job_name);
return state.Apply(update);
}
Status CreateIteration(int64_t iteration_id, const std::string& dataset_id,
const IterationKey& named_iteration_key,
DispatcherState& state) {
int64_t job_id = state.NextAvailableJobId();
TF_RETURN_IF_ERROR(
CreateJob(job_id, dataset_id, named_iteration_key.name, state));
Update update;
CreateIterationUpdate* create_iteration = update.mutable_create_iteration();
create_iteration->set_job_id(job_id);
create_iteration->set_iteration_id(iteration_id);
create_iteration->set_repetition(named_iteration_key.repetition);
return state.Apply(update);
}
Status CreateIteration(int64_t iteration_id, const std::string& dataset_id,
DispatcherState& state) {
IterationKey key(absl::StrCat(random::New64()), 0);
return CreateIteration(iteration_id, dataset_id, key, state);
}
Status AcquireIterationClientId(int64_t iteration_id,
int64_t iteration_client_id,
DispatcherState& state) {
Update update;
AcquireIterationClientUpdate* acquire_iteration_client =
update.mutable_acquire_iteration_client();
acquire_iteration_client->set_iteration_id(iteration_id);
acquire_iteration_client->set_iteration_client_id(iteration_client_id);
return state.Apply(update);
}
Status ReleaseIterationClientId(int64_t iteration_client_id,
int64_t release_time, DispatcherState& state) {
Update update;
ReleaseIterationClientUpdate* release_iteration_client =
update.mutable_release_iteration_client();
release_iteration_client->set_iteration_client_id(iteration_client_id);
release_iteration_client->set_time_micros(release_time);
return state.Apply(update);
}
Status CreateTask(int64_t task_id, int64_t iteration_id,
const std::string& worker_address, DispatcherState& state) {
Update update;
CreateTaskUpdate* create_task = update.mutable_create_task();
create_task->set_task_id(task_id);
create_task->set_iteration_id(iteration_id);
create_task->set_worker_address(worker_address);
return state.Apply(update);
}
Status FinishTask(int64_t task_id, DispatcherState& state) {
Update update;
FinishTaskUpdate* finish_task = update.mutable_finish_task();
finish_task->set_task_id(task_id);
return state.Apply(update);
}
Status Snapshot(const std::string& path, DispatcherState& state) {
Update update;
SnapshotUpdate* snapshot = update.mutable_snapshot();
snapshot->set_path(path);
return state.Apply(update);
}
}
TEST(DispatcherState, RegisterDataset) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t dataset_id_int;
ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int));
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1));
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset));
EXPECT_TRUE(dataset->metadata.element_spec().empty());
EXPECT_EQ(dataset->metadata.compression(),
DataServiceMetadata::COMPRESSION_UNSPECIFIED);
}
TEST(DispatcherState, RegisterDatasetWithExplicitID) {
DispatcherState state;
TF_EXPECT_OK(RegisterDataset("dataset_id", state));
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId("dataset_id", dataset));
EXPECT_EQ(dataset->dataset_id, "dataset_id");
}
TEST(DispatcherState, RegisterDatasetsWithDifferentIDs) {
DispatcherState state;
TF_EXPECT_OK(RegisterDataset("dataset_id1", state));
TF_EXPECT_OK(RegisterDataset("dataset_id2", state));
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId("dataset_id1", dataset));
EXPECT_EQ(dataset->dataset_id, "dataset_id1");
TF_EXPECT_OK(state.DatasetFromId("dataset_id2", dataset));
EXPECT_EQ(dataset->dataset_id, "dataset_id2");
}
TEST(DispatcherState, RegisterDatasetCompression) {
DispatcherState state;
const std::string dataset_id = state.NextAvailableDatasetId();
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id(dataset_id);
register_dataset->mutable_metadata()->set_compression(
DataServiceMetadata::COMPRESSION_SNAPPY);
TF_ASSERT_OK(state.Apply(update));
{
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset));
EXPECT_EQ(dataset->metadata.compression(),
DataServiceMetadata::COMPRESSION_SNAPPY);
}
}
TEST(DispatcherState, RegisterDatasetElementSpec) {
DispatcherState state;
const std::string dataset_id = state.NextAvailableDatasetId();
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id(dataset_id);
register_dataset->mutable_metadata()->set_element_spec(
"encoded_element_spec");
TF_ASSERT_OK(state.Apply(update));
{
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset));
EXPECT_EQ(dataset->metadata.element_spec(), "encoded_element_spec");
}
}
TEST(DispatcherState, MissingDatasetId) {
DispatcherState state;
std::shared_ptr<const Dataset> dataset;
Status s = state.DatasetFromId("missing_dataset_id", dataset);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
TEST(DispatcherState, NextAvailableDatasetId) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t dataset_id_int;
ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int));
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
EXPECT_NE(state.NextAvailableDatasetId(), dataset_id);
EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1));
EXPECT_EQ(state.NextAvailableDatasetId(), state.NextAvailableDatasetId());
}
TEST(DispatcherState, RegisterWorker) {
DispatcherState state;
std::string address = "test_worker_address";
TF_EXPECT_OK(RegisterWorker(address, state));
std::shared_ptr<const Worker> worker;
TF_EXPECT_OK(state.WorkerFromAddress(address, worker));
EXPECT_EQ(worker->address, address);
}
TEST(DispatcherState, RegisterWorkerInFixedWorkerSet) {
experimental::DispatcherConfig config;
config.add_worker_addresses("/worker/task/0");
config.add_worker_addresses("/worker/task/1");
config.add_worker_addresses("/worker/task/2");
DispatcherState state(config);
TF_EXPECT_OK(state.ValidateWorker("/worker/task/0:20000"));
TF_EXPECT_OK(state.ValidateWorker("/worker/task/1:20000"));
TF_EXPECT_OK(state.ValidateWorker("/worker/task/2:20000"));
TF_EXPECT_OK(RegisterWorker("/worker/task/0:20000", state));
TF_EXPECT_OK(RegisterWorker("/worker/task/1:20000", state));
TF_EXPECT_OK(RegisterWorker("/worker/task/2:20000", state));
std::shared_ptr<const Worker> worker;
TF_EXPECT_OK(state.WorkerFromAddress("/worker/task/0:20000", worker));
EXPECT_EQ(worker->address, "/worker/task/0:20000");
}
TEST(DispatcherState, RegisterInvalidWorkerInFixedWorkerSet) {
experimental::DispatcherConfig config;
config.add_worker_addresses("/worker/task/0");
config.add_worker_addresses("/worker/task/1");
config.add_worker_addresses("/worker/task/2");
DispatcherState state(config);
EXPECT_THAT(state.ValidateWorker("localhost:20000"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("The worker's address is not configured")));
TF_EXPECT_OK(RegisterWorker("localhost:20000", state));
std::shared_ptr<const Worker> worker;
EXPECT_THAT(state.WorkerFromAddress("/worker/task/0:20000", worker),
StatusIs(error::NOT_FOUND,
"Worker with address /worker/task/0:20000 not found."));
}
TEST(DispatcherState, ListWorkers) {
DispatcherState state;
std::string address_1 = "address_1";
std::string address_2 = "address_2";
{
std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers();
EXPECT_THAT(workers, IsEmpty());
}
TF_EXPECT_OK(RegisterWorker(address_1, state));
{
std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers();
EXPECT_THAT(workers, SizeIs(1));
}
TF_EXPECT_OK(RegisterWorker(address_2, state));
{
std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers();
EXPECT_THAT(workers, SizeIs(2));
}
}
TEST(DispatcherState, MissingWorker) {
DispatcherState state;
std::shared_ptr<const Worker> worker;
Status s = state.WorkerFromAddress("test_worker_address", worker);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
TEST(DispatcherState, UnknownUpdate) {
DispatcherState state;
Update update;
Status s = state.Apply(update);
EXPECT_EQ(s.code(), error::INTERNAL);
}
TEST(DispatcherState, JobName) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t job_id = state.NextAvailableJobId();
std::string job_name = "test_name";
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateJob(job_id, dataset_id, job_name, state));
std::shared_ptr<const Job> job;
TF_EXPECT_OK(state.JobByName(job_name, job));
EXPECT_EQ(state.NextAvailableJobId(), job_id + 1);
EXPECT_EQ(job->dataset_id, dataset_id);
EXPECT_FALSE(job->use_cross_trainer_cache);
}
TEST(DispatcherState, JobData) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t job_id = state.NextAvailableJobId();
int64_t num_consumers = 8;
bool use_cross_trainer_cache = true;
TF_ASSERT_OK(RegisterDataset(dataset_id, state));
Update update;
CreateJobUpdate* create_job = update.mutable_create_job();
create_job->set_job_id(job_id);
create_job->set_dataset_id(dataset_id);
create_job->set_num_consumers(num_consumers);
create_job->set_use_cross_trainer_cache(use_cross_trainer_cache);
TF_ASSERT_OK(state.Apply(update));
std::shared_ptr<const Job> job;
TF_ASSERT_OK(state.JobFromId(job_id, job));
EXPECT_EQ(job->num_consumers, num_consumers);
EXPECT_EQ(job->use_cross_trainer_cache, use_cross_trainer_cache);
}
TEST(DispatcherState, CrossTrainerCacheTask) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
std::string worker_address = "test_worker_address";
TF_ASSERT_OK(RegisterDataset(dataset_id, state));
int64_t job_id = state.NextAvailableJobId();
Update job_update;
CreateJobUpdate* create_job = job_update.mutable_create_job();
create_job->set_job_id(job_id);
create_job->set_dataset_id(dataset_id);
create_job->set_use_cross_trainer_cache(true);
TF_ASSERT_OK(state.Apply(job_update));
int64_t iteration_id = state.NextAvailableIterationId();
Update iteration_update;
CreateIterationUpdate* create_iteration =
iteration_update.mutable_create_iteration();
create_iteration->set_job_id(job_id);
create_iteration->set_iteration_id(iteration_id);
TF_ASSERT_OK(state.Apply(iteration_update));
int64_t task_id = state.NextAvailableTaskId();
TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state));
std::shared_ptr<const Task> task;
TF_EXPECT_OK(state.TaskFromId(task_id, task));
EXPECT_EQ(task->iteration->iteration_id, iteration_id);
EXPECT_EQ(task->task_id, task_id);
EXPECT_EQ(task->worker_address, worker_address);
EXPECT_TRUE(task->iteration->job->use_cross_trainer_cache);
}
TEST(DispatcherState, CreateTask) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
std::string worker_address = "test_worker_address";
DispatcherState state;
int64_t task_id = state.NextAvailableTaskId();
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state));
EXPECT_EQ(state.NextAvailableTaskId(), task_id + 1);
{
std::shared_ptr<const Task> task;
TF_EXPECT_OK(state.TaskFromId(task_id, task));
EXPECT_EQ(task->iteration->iteration_id, iteration_id);
EXPECT_EQ(task->task_id, task_id);
EXPECT_EQ(task->worker_address, worker_address);
EXPECT_FALSE(task->iteration->job->use_cross_trainer_cache);
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks));
EXPECT_THAT(tasks, SizeIs(1));
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks));
EXPECT_EQ(1, tasks.size());
}
}
TEST(DispatcherState, CreateTasksForSameIteration) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks));
EXPECT_THAT(tasks, SizeIs(2));
}
}
TEST(DispatcherState, CreateTasksForDifferentIterations) {
std::string dataset_id = "dataset_id";
int64_t iteration_id_1 = 3;
int64_t iteration_id_2 = 4;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id_1, dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id_2, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id_1, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id_2, worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id_1, tasks));
EXPECT_THAT(tasks, SizeIs(1));
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id_2, tasks));
EXPECT_THAT(tasks, SizeIs(1));
}
}
TEST(DispatcherState, CreateTasksForSameWorker) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks));
EXPECT_EQ(2, tasks.size());
}
}
TEST(DispatcherState, CreateTasksForDifferentWorkers) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address_1 = "test_worker_address_1";
std::string worker_address_2 = "test_worker_address_2";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address_1, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address_2, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address_1, tasks));
EXPECT_EQ(1, tasks.size());
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address_2, tasks));
EXPECT_EQ(1, tasks.size());
}
}
TEST(DispatcherState, GetTasksForWorkerEmpty) {
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterWorker(worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks));
EXPECT_EQ(0, tasks.size());
}
}
TEST(DispatcherState, FinishTask) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id = 4;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state));
TF_EXPECT_OK(FinishTask(task_id, state));
std::shared_ptr<const Task> task;
TF_EXPECT_OK(state.TaskFromId(task_id, task));
EXPECT_TRUE(task->finished);
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_TRUE(iteration->finished);
}
TEST(DispatcherState, FinishMultiTaskIteration) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 4;
int64_t task_id_2 = 5;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state));
TF_EXPECT_OK(FinishTask(task_id_1, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_FALSE(iteration->finished);
}
TF_EXPECT_OK(FinishTask(task_id_2, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_TRUE(iteration->finished);
}
}
TEST(DispatcherState, AcquireIterationClientId) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id_1 = 1;
int64_t iteration_client_id_2 = 2;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_1, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_EQ(iteration->num_clients, 1);
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_2, state));
EXPECT_EQ(iteration->num_clients, 2);
}
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(
state.IterationForIterationClientId(iteration_client_id_1, iteration));
EXPECT_EQ(iteration->iteration_id, iteration_id);
}
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(
state.IterationForIterationClientId(iteration_client_id_2, iteration));
EXPECT_EQ(iteration->iteration_id, iteration_id);
}
}
TEST(DispatcherState, ReleaseIterationClientId) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id = 6;
int64_t release_time = 100;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id, state));
TF_EXPECT_OK(
ReleaseIterationClientId(iteration_client_id, release_time, state));
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_EQ(iteration->num_clients, 0);
Status s =
state.IterationForIterationClientId(iteration_client_id, iteration);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
TEST(DispatcherState, ListActiveClientsEmpty) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id = 6;
int64_t release_time = 100;
DispatcherState state;
EXPECT_THAT(state.ListActiveClientIds(), IsEmpty());
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id, state));
TF_EXPECT_OK(
ReleaseIterationClientId(iteration_client_id, release_time, state));
EXPECT_THAT(state.ListActiveClientIds(), IsEmpty());
}
TEST(DispatcherState, ListActiveClients) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id_1 = 6;
int64_t iteration_client_id_2 = 7;
int64_t iteration_client_id_3 = 8;
int64_t release_time = 100;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_1, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_2, state));
TF_EXPECT_OK(
ReleaseIterationClientId(iteration_client_id_2, release_time, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_3, state));
EXPECT_THAT(state.ListActiveClientIds(), UnorderedElementsAre(6, 8));
}
TEST(DispatcherState, ListSnapshotPaths) {
DispatcherState state;
absl::flat_hash_set<std::string> snapshot_paths = {"p1", "p2"};
for (const auto& snapshot_path : snapshot_paths) {
TF_EXPECT_OK(Snapshot(snapshot_path, state));
}
EXPECT_EQ(state.ListSnapshotPaths(), snapshot_paths);
}
TEST(DispatcherState, GetNumberOfRegisteredWorkers) {
DispatcherState state;
std::string address_1 = "address_1";
std::string address_2 = "address_2";
EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 0);
TF_EXPECT_OK(RegisterWorker(address_1, state));
EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 1);
TF_EXPECT_OK(RegisterWorker(address_2, state));
EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 2);
}
}
} | void DispatcherState::AcquireIterationClient(
const AcquireIterationClientUpdate& acquire_iteration_client) {
int64_t iteration_client_id = acquire_iteration_client.iteration_client_id();
std::shared_ptr<Iteration>& iteration =
iterations_for_client_ids_[iteration_client_id];
DCHECK(!iteration);
iteration = iterations_[acquire_iteration_client.iteration_id()];
DCHECK(iteration);
iteration->num_clients++;
next_available_iteration_client_id_ =
std::max(next_available_iteration_client_id_, iteration_client_id + 1);
} | TEST(DispatcherState, AcquireIterationClientId) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id_1 = 1;
int64_t iteration_client_id_2 = 2;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_1, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_EQ(iteration->num_clients, 1);
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_2, state));
EXPECT_EQ(iteration->num_clients, 2);
}
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(
state.IterationForIterationClientId(iteration_client_id_1, iteration));
EXPECT_EQ(iteration->iteration_id, iteration_id);
}
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(
state.IterationForIterationClientId(iteration_client_id_2, iteration));
EXPECT_EQ(iteration->iteration_id, iteration_id);
}
} |
#include "quiche/quic/core/tls_server_handshaker.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "openssl/base.h"
#include "openssl/bytestring.h"
#include "openssl/ssl.h"
#include "openssl/tls1.h"
#include "quiche/quic/core/crypto/crypto_handshake.h"
#include "quiche/quic/core/crypto/crypto_message_parser.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/crypto/proof_source.h"
#include "quiche/quic/core/crypto/proof_verifier.h"
#include "quiche/quic/core/crypto/quic_crypto_server_config.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/crypto/transport_parameters.h"
#include "quiche/quic/core/http/http_encoder.h"
#include "quiche/quic/core/http/http_frames.h"
#include "quiche/quic/core/quic_config.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_connection_context.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_connection_stats.h"
#include "quiche/quic/core/quic_crypto_server_stream_base.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_time_accumulator.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/core/tls_handshaker.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_hostname_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_server_stats.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#define RECORD_LATENCY_IN_US(stat_name, latency, comment) \
do { \
const int64_t latency_in_us = (latency).ToMicroseconds(); \
QUIC_DVLOG(1) << "Recording " stat_name ": " << latency_in_us; \
QUIC_SERVER_HISTOGRAM_COUNTS(stat_name, latency_in_us, 1, 10000000, 50, \
comment); \
} while (0)
namespace quic {
namespace {
uint16_t kDefaultPort = 443;
}
TlsServerHandshaker::DefaultProofSourceHandle::DefaultProofSourceHandle(
TlsServerHandshaker* handshaker, ProofSource* proof_source)
: handshaker_(handshaker), proof_source_(proof_source) {}
TlsServerHandshaker::DefaultProofSourceHandle::~DefaultProofSourceHandle() {
CloseHandle();
}
void TlsServerHandshaker::DefaultProofSourceHandle::CloseHandle() {
QUIC_DVLOG(1) << "CloseHandle. is_signature_pending="
<< (signature_callback_ != nullptr);
if (signature_callback_) {
signature_callback_->Cancel();
signature_callback_ = nullptr;
}
}
QuicAsyncStatus
TlsServerHandshaker::DefaultProofSourceHandle::SelectCertificate(
const QuicSocketAddress& server_address,
const QuicSocketAddress& client_address,
const QuicConnectionId& ,
absl::string_view , const std::string& hostname,
absl::string_view , const std::string& ,
std::optional<std::string> ,
const std::vector<uint8_t>& ,
const std::optional<std::vector<uint8_t>>& ,
const QuicSSLConfig& ) {
if (!handshaker_ || !proof_source_) {
QUIC_BUG(quic_bug_10341_1)
<< "SelectCertificate called on a detached handle";
return QUIC_FAILURE;
}
bool cert_matched_sni;
quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain =
proof_source_->GetCertChain(server_address, client_address, hostname,
&cert_matched_sni);
handshaker_->OnSelectCertificateDone(
true, true,
ProofSourceHandleCallback::LocalSSLConfig{chain.get(),
QuicDelayedSSLConfig()},
absl::string_view(), cert_matched_sni);
if (!handshaker_->select_cert_status().has_value()) {
QUIC_BUG(quic_bug_12423_1)
<< "select_cert_status() has no value after a synchronous select cert";
return QUIC_SUCCESS;
}
return *handshaker_->select_cert_status();
}
QuicAsyncStatus TlsServerHandshaker::DefaultProofSourceHandle::ComputeSignature(
const QuicSocketAddress& server_address,
const QuicSocketAddress& client_address, const std::string& hostname,
uint16_t signature_algorithm, absl::string_view in,
size_t max_signature_size) {
if (!handshaker_ || !proof_source_) {
QUIC_BUG(quic_bug_10341_2)
<< "ComputeSignature called on a detached handle";
return QUIC_FAILURE;
}
if (signature_callback_) {
QUIC_BUG(quic_bug_10341_3) << "ComputeSignature called while pending";
return QUIC_FAILURE;
}
signature_callback_ = new DefaultSignatureCallback(this);
proof_source_->ComputeTlsSignature(
server_address, client_address, hostname, signature_algorithm, in,
std::unique_ptr<DefaultSignatureCallback>(signature_callback_));
if (signature_callback_) {
QUIC_DVLOG(1) << "ComputeTlsSignature is pending";
signature_callback_->set_is_sync(false);
return QUIC_PENDING;
}
bool success = handshaker_->HasValidSignature(max_signature_size);
QUIC_DVLOG(1) << "ComputeTlsSignature completed synchronously. success:"
<< success;
return success ? QUIC_SUCCESS : QUIC_FAILURE;
}
TlsServerHandshaker::DecryptCallback::DecryptCallback(
TlsServerHandshaker* handshaker)
: handshaker_(handshaker) {}
void TlsServerHandshaker::DecryptCallback::Run(std::vector<uint8_t> plaintext) {
if (handshaker_ == nullptr) {
return;
}
TlsServerHandshaker* handshaker = handshaker_;
handshaker_ = nullptr;
handshaker->decrypted_session_ticket_ = std::move(plaintext);
const bool is_async =
(handshaker->expected_ssl_error() == SSL_ERROR_PENDING_TICKET);
std::optional<QuicConnectionContextSwitcher> context_switcher;
if (is_async) {
context_switcher.emplace(handshaker->connection_context());
}
QUIC_TRACESTRING(
absl::StrCat("TLS ticket decryption done. len(decrypted_ticket):",
handshaker->decrypted_session_ticket_.size()));
if (is_async) {
handshaker->AdvanceHandshakeFromCallback();
}
handshaker->ticket_decryption_callback_ = nullptr;
}
void TlsServerHandshaker::DecryptCallback::Cancel() {
QUICHE_DCHECK(handshaker_);
handshaker_ = nullptr;
}
TlsServerHandshaker::TlsServerHandshaker(
QuicSession* session, const QuicCryptoServerConfig* crypto_config)
: TlsHandshaker(this, session),
QuicCryptoServerStreamBase(session),
proof_source_(crypto_config->proof_source()),
pre_shared_key_(crypto_config->pre_shared_key()),
crypto_negotiated_params_(new QuicCryptoNegotiatedParameters),
tls_connection_(crypto_config->ssl_ctx(), this, session->GetSSLConfig()),
crypto_config_(crypto_config) {
QUIC_DVLOG(1) << "TlsServerHandshaker: client_cert_mode initial value: "
<< client_cert_mode();
QUICHE_DCHECK_EQ(PROTOCOL_TLS1_3,
session->connection()->version().handshake_protocol);
SSL_set_accept_state(ssl());
int use_legacy_extension = 0;
if (session->version().UsesLegacyTlsExtension()) {
use_legacy_extension = 1;
}
SSL_set_quic_use_legacy_codepoint(ssl(), use_legacy_extension);
if (session->connection()->context()->tracer) {
tls_connection_.EnableInfoCallback();
}
#if BORINGSSL_API_VERSION >= 22
if (!crypto_config->preferred_groups().empty()) {
SSL_set1_group_ids(ssl(), crypto_config->preferred_groups().data(),
crypto_config->preferred_groups().size());
}
#endif
}
TlsServerHandshaker::~TlsServerHandshaker() { CancelOutstandingCallbacks(); }
void TlsServerHandshaker::CancelOutstandingCallbacks() {
if (proof_source_handle_) {
proof_source_handle_->CloseHandle();
}
if (ticket_decryption_callback_) {
ticket_decryption_callback_->Cancel();
ticket_decryption_callback_ = nullptr;
}
}
void TlsServerHandshaker::InfoCallback(int type, int value) {
QuicConnectionTracer* tracer =
session()->connection()->context()->tracer.get();
if (tracer == nullptr) {
return;
}
if (type & SSL_CB_LOOP) {
tracer->PrintString(
absl::StrCat("SSL:ACCEPT_LOOP:", SSL_state_string_long(ssl())));
} else if (type & SSL_CB_ALERT) {
const char* prefix =
(type & SSL_CB_READ) ? "SSL:READ_ALERT:" : "SSL:WRITE_ALERT:";
tracer->PrintString(absl::StrCat(prefix, SSL_alert_type_string_long(value),
":", SSL_alert_desc_string_long(value)));
} else if (type & SSL_CB_EXIT) {
const char* prefix =
(value == 1) ? "SSL:ACCEPT_EXIT_OK:" : "SSL:ACCEPT_EXIT_FAIL:";
tracer->PrintString(absl::StrCat(prefix, SSL_state_string_long(ssl())));
} else if (type & SSL_CB_HANDSHAKE_START) {
tracer->PrintString(
absl::StrCat("SSL:HANDSHAKE_START:", SSL_state_string_long(ssl())));
} else if (type & SSL_CB_HANDSHAKE_DONE) {
tracer->PrintString(
absl::StrCat("SSL:HANDSHAKE_DONE:", SSL_state_string_long(ssl())));
} else {
QUIC_DLOG(INFO) << "Unknown event type " << type << ": "
<< SSL_state_string_long(ssl());
tracer->PrintString(
absl::StrCat("SSL:unknown:", value, ":", SSL_state_string_long(ssl())));
}
}
std::unique_ptr<ProofSourceHandle>
TlsServerHandshaker::MaybeCreateProofSourceHandle() {
return std::make_unique<DefaultProofSourceHandle>(this, proof_source_);
}
bool TlsServerHandshaker::GetBase64SHA256ClientChannelID(
std::string* ) const {
return false;
}
void TlsServerHandshaker::SendServerConfigUpdate(
const CachedNetworkParameters* ) {
}
bool TlsServerHandshaker::DisableResumption() {
if (!can_disable_resumption_ || !session()->connection()->connected()) {
return false;
}
tls_connection_.DisableTicketSupport();
return true;
}
bool TlsServerHandshaker::IsZeroRtt() const {
return SSL_early_data_accepted(ssl());
}
bool TlsServerHandshaker::IsResumption() const {
return SSL_session_reused(ssl());
}
bool TlsServerHandshaker::ResumptionAttempted() const {
return ticket_received_;
}
bool TlsServerHandshaker::EarlyDataAttempted() const {
QUIC_BUG_IF(quic_tls_early_data_attempted_too_early,
!select_cert_status_.has_value())
<< "EarlyDataAttempted must be called after EarlySelectCertCallback is "
"started";
return early_data_attempted_;
}
int TlsServerHandshaker::NumServerConfigUpdateMessagesSent() const {
return 0;
}
const CachedNetworkParameters*
TlsServerHandshaker::PreviousCachedNetworkParams() const {
return last_received_cached_network_params_.get();
}
void TlsServerHandshaker::SetPreviousCachedNetworkParams(
CachedNetworkParameters cached_network_params) {
last_received_cached_network_params_ =
std::make_unique<CachedNetworkParameters>(cached_network_params);
}
void TlsServerHandshaker::OnPacketDecrypted(EncryptionLevel level) {
if (level == ENCRYPTION_HANDSHAKE && state_ < HANDSHAKE_PROCESSED) {
state_ = HANDSHAKE_PROCESSED;
handshaker_delegate()->DiscardOldEncryptionKey(ENCRYPTION_INITIAL);
handshaker_delegate()->DiscardOldDecryptionKey(ENCRYPTION_INITIAL);
}
}
void TlsServerHandshaker::OnHandshakeDoneReceived() { QUICHE_DCHECK(false); }
void TlsServerHandshaker::OnNewTokenReceived(absl::string_view ) {
QUICHE_DCHECK(false);
}
std::string TlsServerHandshaker::GetAddressToken(
const CachedNetworkParameters* cached_network_params) const {
SourceAddressTokens empty_previous_tokens;
const QuicConnection* connection = session()->connection();
return crypto_config_->NewSourceAddressToken(
crypto_config_->source_address_token_boxer(), empty_previous_tokens,
connection->effective_peer_address().host(),
connection->random_generator(), connection->clock()->WallNow(),
cached_network_params);
}
bool TlsServerHandshaker::ValidateAddressToken(absl::string_view token) const {
SourceAddressTokens tokens;
HandshakeFailureReason reason = crypto_config_->ParseSourceAddressToken(
crypto_config_->source_address_token_boxer(), token, tokens);
if (reason != HANDSHAKE_OK) {
QUIC_DLOG(WARNING) << "Failed to parse source address token: "
<< CryptoUtils::HandshakeFailureReasonToString(reason);
return false;
}
auto cached_network_params = std::make_unique<CachedNetworkParameters>();
reason = crypto_config_->ValidateSourceAddressTokens(
tokens, session()->connection()->effective_peer_address().host(),
session()->connection()->clock()->WallNow(), cached_network_params.get());
if (reason != HANDSHAKE_OK) {
QUIC_DLOG(WARNING) << "Failed to validate source address token: "
<< CryptoUtils::HandshakeFailureReasonToString(reason);
return false;
}
last_received_cached_network_params_ = std::move(cached_network_params);
return true;
}
bool TlsServerHandshaker::ShouldSendExpectCTHeader() const { return false; }
bool TlsServerHandshaker::DidCertMatchSni() const { return cert_matched_sni_; }
const ProofSource::Details* TlsServerHandshaker::ProofSourceDetails() const {
return proof_source_details_.get();
}
bool TlsServerHandshaker::ExportKeyingMaterial(absl::string_view label,
absl::string_view context,
size_t result_len,
std::string* result) {
return ExportKeyingMaterialForLabel(label, context, result_len, result);
}
void TlsServerHandshaker::OnConnectionClosed(
const QuicConnectionCloseFrame& frame, ConnectionCloseSource source) {
TlsHandshaker::OnConnectionClosed(frame.quic_error_code, source);
}
ssl_early_data_reason_t TlsServerHandshaker::EarlyDataReason() const {
return TlsHandshaker::EarlyDataReason();
}
bool TlsServerHandshaker::encryption_established() const {
return encryption_established_;
}
bool TlsServerHandshaker::one_rtt_keys_available() const {
return state_ == HANDSHAKE_CONFIRMED;
}
const QuicCryptoNegotiatedParameters&
TlsServerHandshaker::crypto_negotiated_params() const {
return *crypto_negotiated_params_;
}
CryptoMessageParser* TlsServerHandshaker::crypto_message_parser() {
return TlsHandshaker::crypto_message_parser();
}
HandshakeState TlsServerHandshaker::GetHandshakeState() const { return state_; }
void TlsServerHandshaker::SetServerApplicationStateForResumption(
std::unique_ptr<ApplicationState> state) {
application_state_ = std::move(state);
}
size_t TlsServerHandshaker::BufferSizeLimitForLevel(
EncryptionLevel level) const {
return TlsHandshaker::BufferSizeLimitForLevel(level);
}
std::unique_ptr<QuicDecrypter>
TlsServerHandshaker::AdvanceKeysAndCreateCurrentOneRttDecrypter() {
return TlsHandshaker::AdvanceKeysAndCreateCurrentOneRttDecrypter();
}
std::unique_ptr<QuicEncrypter>
TlsServerHandshaker::CreateCurrentOneRttEncrypter() {
return TlsHandshaker::CreateCurrentOneRttEncrypter();
}
void TlsServerHandshaker::OverrideQuicConfigDefaults(QuicConfig* ) {}
void TlsServerHandshaker::AdvanceHandshakeFromCallback() {
QuicConnection::ScopedPacketFlusher flusher(session()->connection());
AdvanceHandshake();
if (!is_connection_closed()) {
handshaker_delegate()->OnHandshakeCallbackDone();
}
}
bool TlsServerHandshaker::ProcessTransportParameters(
const SSL_CLIENT_HELLO* client_hello, std::string* error_details) {
TransportParameters client_params;
const uint8_t* client_params_bytes;
size_t params_bytes_len;
uint16_t extension_type = TLSEXT_TYPE_quic_transport_parameters_standard;
if (session()->version().UsesLegacyTlsExtension()) {
extension_type = TLSEXT_TYPE_quic_transport_parameters_legacy;
}
if (!SSL_early_callback_ctx_extension_get(client_hello, extension_type,
&client_params_bytes,
¶ms_bytes_len)) {
params_bytes_len = 0;
}
if (params_bytes_len == 0) {
*error_details = "Client's transport parameters are missing";
return false;
}
std::string parse_error_details;
if (!ParseTransportParameters(session()->connection()->version(),
Perspective::IS_CLIENT, client_params_bytes,
params_bytes_len, &client_params,
&parse_error_details)) {
QUICHE_DCHECK(!parse_error_details.empty());
*error_details =
"Unable to parse client's transport parameters: " + parse_error_details;
return false;
}
session()->connection()->OnTransportParametersReceived(client_params);
if (client_params.legacy_version_information.has_value() &&
CryptoUtils::ValidateClientHelloVersion(
client_params.legacy_version_information->version,
session()->connection()->version(), session()->supported_versions(),
error_details) != QUIC_NO_ERROR) {
return false;
}
if (client_params.version_information.has_value() &&
!CryptoUtils::ValidateChosenVersion(
client_params.version_information->chosen_version,
session()->version(), error_details)) {
QUICHE_DCHECK(!error_details->empty());
return false;
}
if (handshaker_delegate()->ProcessTransportParameters(
client_params, false, error_details) !=
QUIC_NO_ERROR) {
return false;
}
if (!ProcessAdditionalTransportParameters(client_params)) {
*error_details = "Failed to process additional transport parameters";
return false;
}
return true;
}
TlsServerHandshaker::SetTransportParametersResult
TlsServerHandshaker::SetTransportParameters() {
SetTransportParametersResult result;
QUICHE_DCHECK(!result.success);
server_params_.perspective = Perspective::IS_SERVER;
server_params_.legacy_version_information =
TransportParameters::LegacyVersionInformation();
server_params_.legacy_version_information->supported_versions =
CreateQuicVersionLabelVector(session()->supported_versions());
server_params_.legacy_version_information->version =
CreateQuicVersionLabel(session()->connection()->version());
server_params_.version_information =
TransportParameters::VersionInformation();
server_params_.version_information->chosen_version =
CreateQuicVersionLabel(session()->version());
server_params_.version_information->other_versions =
CreateQuicVersionLabelVector(session()->supported_versions());
if (!handshaker_delegate()->FillTransportParameters(&server_params_)) {
return result;
}
session()->connection()->OnTransportParametersSent(server_params_);
{
std::vector<uint8_t> server_params_bytes;
if (!SerializeTransportParameters(server_params_, &server_params_bytes) ||
SSL_set_quic_transport_params(ssl(), server_params_bytes.data(),
server_params_bytes.size()) != 1) {
return result;
}
result.quic_transport_params = std::move(server_params_bytes);
}
if (application_state_) {
std::vector<uint8_t> early_data_context;
if (!SerializeTransportParametersForTicket(
server_params_, *application_state_, &early_data_context)) {
QUIC_BUG(quic_bug_10341_4)
<< "Failed to serialize Transport Parameters for ticket.";
result.early_data_context = std::vector<uint8_t>();
return result;
}
SSL_set_quic_early_data_context(ssl(), early_data_context.data(),
early_data_context.size());
result.early_data_context = std::move(early_data_context);
application_state_.reset(nullptr);
}
result.success = true;
return result;
}
bool TlsServerHandshaker::TransportParametersMatch(
absl::Span<const uint8_t> serialized_params) const {
TransportParameters params;
std::string error_details;
bool parse_ok = ParseTransportParameters(
session()->version(), Perspective::IS_SERVER, serialized_params.data(),
serialized_params.size(), ¶ms, &error_details);
if (!parse_ok) {
return false;
}
DegreaseTransportParameters(params);
return params == server_params_;
}
void TlsServerHandshaker::SetWriteSecret(
EncryptionLevel level, const SSL_CIPHER* cipher,
absl::Span<const uint8_t> write_secret) {
if (is_connection_closed()) {
return;
}
if (level == ENCRYPTION_FORWARD_SECURE) {
encryption_established_ = true;
const SSL_CIPHER* ssl_cipher = SSL_get_current_cipher(ssl());
if (ssl_cipher) {
crypto_negotiated_params_->cipher_suite =
SSL_CIPHER_get_protocol_id(ssl_cipher);
}
crypto_negotiated_params_->key_exchange_group = SSL_get_curve_id(ssl());
crypto_negotiated_params_->encrypted_client_hello = SSL_ech_accepted(ssl());
}
TlsHandshaker::SetWriteSecret(level, cipher, write_secret);
}
std::string TlsServerHandshaker::GetAcceptChValueForHostname(
const std::string& ) const {
return {};
}
bool TlsServerHandshaker::UseAlpsNewCodepoint() const {
if (!select_cert_status_.has_value()) {
QUIC_BUG(quic_tls_check_alps_new_codepoint_too_early)
<< "UseAlpsNewCodepoint must be called after "
"EarlySelectCertCallback is started";
return false;
}
return alps_new_codepoint_received_;
}
void TlsServerHandshaker::FinishHandshake() {
QUICHE_DCHECK(!SSL_in_early_data(ssl()));
if (!valid_alpn_received_) {
QUIC_DLOG(ERROR)
<< "Server: handshake finished without receiving a known ALPN";
CloseConnection(QUIC_HANDSHAKE_FAILED,
"Server did not receive a known ALPN");
return;
}
ssl_early_data_reason_t reason_code = EarlyDataReason();
QUIC_DLOG(INFO) << "Server: handshake finished. Early data reason "
<< reason_code << " ("
<< CryptoUtils::EarlyDataReasonToString(reason_code) << ")";
state_ = HANDSHAKE_CONFIRMED;
handshaker_delegate()->OnTlsHandshakeComplete();
handshaker_delegate()->DiscardOldEncryptionKey(ENCRYPTION_HANDSHAKE);
handshaker_delegate()->DiscardOldDecryptionKey(ENCRYPTION_HANDSHAKE);
}
QuicAsyncStatus TlsServerHandshaker::VerifyCertChain(
const std::vector<std::string>& , std::string* ,
std::unique_ptr<ProofVerifyDetails>* , uint8_t* ,
std::unique_ptr<ProofVerifierCallback> ) {
QUIC_DVLOG(1) << "VerifyCertChain returning success";
return QUIC_SUCCESS;
}
void TlsServerHandshaker::OnProofVerifyDetailsAvailable(
const ProofVerifyDetails& ) {}
ssl_private_key_result_t TlsServerHandshaker::PrivateKeySign(
uint8_t* out, size_t* out_len, size_t max_out, uint16_t sig_alg,
absl::string_view in) {
QUICHE_DCHECK_EQ(expected_ssl_error(), SSL_ERROR_WANT_READ);
QuicAsyncStatus status = proof_source_handle_->ComputeSignature(
session()->connection()->self_address(),
session()->connection()->peer_address(), crypto_negotiated_params_->sni,
sig_alg, in, max_out);
if (status == QUIC_PENDING) {
set_expected_ssl_error(SSL_ERROR_WANT_PRIVATE_KEY_OPERATION);
if (async_op_timer_.has_value()) {
QUIC_CODE_COUNT(
quic_tls_server_computing_signature_while_another_op_pending);
}
async_op_timer_ = QuicTimeAccumulator();
async_op_timer_->Start(now());
}
return PrivateKeyComplete(out, out_len, max_out);
}
ssl_private_key_result_t TlsServerHandshaker::PrivateKeyComplete(
uint8_t* out, size_t* out_len, size_t max_out) {
if (expected_ssl_error() == SSL_ERROR_WANT_PRIVATE_KEY_OPERATION) {
return ssl_private_key_retry;
}
const bool success = HasValidSignature(max_out);
QuicConnectionStats::TlsServerOperationStats compute_signature_stats;
compute_signature_stats.success = success;
if (async_op_timer_.has_value()) {
async_op_timer_->Stop(now());
compute_signature_stats.async_latency =
async_op_timer_->GetTotalElapsedTime();
async_op_timer_.reset();
RECORD_LATENCY_IN_US("tls_server_async_compute_signature_latency_us",
compute_signature_stats.async_latency,
"Async compute signature latency in microseconds");
}
connection_stats().tls_server_compute_signature_stats =
std::move(compute_signature_stats);
if (!success) {
return ssl_private_key_failure;
}
*out_len = cert_verify_sig_.size();
memcpy(out, cert_verify_sig_.data(), *out_len);
cert_verify_sig_.clear();
cert_verify_sig_.shrink_to_fit();
return ssl_private_key_success;
}
void TlsServerHandshaker::OnComputeSignatureDone(
bool ok, bool is_sync, std::string signature,
std::unique_ptr<ProofSource::Details> details) {
QUIC_DVLOG(1) << "OnComputeSignatureDone. ok:" << ok
<< ", is_sync:" << is_sync
<< ", len(signature):" << signature.size();
std::optional<QuicConnectionContextSwitcher> context_switcher;
if (!is_sync) {
context_switcher.emplace(connection_context());
}
QUIC_TRACESTRING(absl::StrCat("TLS compute signature done. ok:", ok,
", len(signature):", signature.size()));
if (ok) {
cert_verify_sig_ = std::move(signature);
proof_source_details_ = std::move(details);
}
const int last_expected_ssl_error = expected_ssl_error();
set_expected_ssl_error(SSL_ERROR_WANT_READ);
if (!is_sync) {
QUICHE_DCHECK_EQ(last_expected_ssl_error,
SSL_ERROR_WANT_PRIVATE_KEY_OPERATION);
AdvanceHandshakeFromCallback();
}
}
bool TlsServerHandshaker::HasValidSignature(size_t max_signature_size) const {
return !cert_verify_sig_.empty() &&
cert_verify_sig_.size() <= max_signature_size;
}
size_t TlsServerHandshaker::SessionTicketMaxOverhead() {
QUICHE_DCHECK(proof_source_->GetTicketCrypter());
return proof_source_->GetTicketCrypter()->MaxOverhead();
}
int TlsServerHandshaker::SessionTicketSeal(uint8_t* out, size_t* out_len,
size_t max_out_len,
absl::string_view in) {
QUICHE_DCHECK(proof_source_->GetTicketCrypter());
std::vector<uint8_t> ticket =
proof_source_->GetTicketCrypter()->Encrypt(in, ticket_encryption_key_);
if (GetQuicReloadableFlag(
quic_send_placeholder_ticket_when_encrypt_ticket_fails) &&
ticket.empty()) {
QUIC_CODE_COUNT(quic_tls_server_handshaker_send_placeholder_ticket);
const absl::string_view kTicketFailurePlaceholder = "TICKET FAILURE";
const absl::string_view kTicketWithSizeLimit =
kTicketFailurePlaceholder.substr(0, max_out_len);
ticket.assign(kTicketWithSizeLimit.begin(), kTicketWithSizeLimit.end());
}
if (max_out_len < ticket.size()) {
QUIC_BUG(quic_bug_12423_2)
<< "TicketCrypter returned " << ticket.size()
<< " bytes of ciphertext, which is larger than its max overhead of "
<< max_out_len;
return 0;
}
*out_len = ticket.size();
memcpy(out, ticket.data(), ticket.size());
QUIC_CODE_COUNT(quic_tls_server_handshaker_tickets_sealed);
return 1;
}
ssl_ticket_aead_result_t TlsServerHandshaker::SessionTicketOpen(
uint8_t* out, size_t* out_len, size_t max_out_len, absl::string_view in) {
QUICHE_DCHECK(proof_source_->GetTicketCrypter());
if (ignore_ticket_open_) {
QUIC_CODE_COUNT(quic_tls_server_handshaker_tickets_ignored_1);
return ssl_ticket_aead_ignore_ticket;
}
if (!ticket_decryption_callback_) {
ticket_decryption_callback_ = std::make_shared<DecryptCallback>(this);
proof_source_->GetTicketCrypter()->Decrypt(in, ticket_decryption_callback_);
if (ticket_decryption_callback_) {
QUICHE_DCHECK(!ticket_decryption_callback_->IsDone());
set_expected_ssl_error(SSL_ERROR_PENDING_TICKET);
if (async_op_timer_.has_value()) {
QUIC_CODE_COUNT(
quic_tls_server_decrypting_ticket_while_another_op_pending);
}
async_op_timer_ = QuicTimeAccumulator();
async_op_timer_->Start(now());
}
}
if (ticket_decryption_callback_ && !ticket_decryption_callback_->IsDone()) {
return ssl_ticket_aead_retry;
}
ssl_ticket_aead_result_t result =
FinalizeSessionTicketOpen(out, out_len, max_out_len);
QuicConnectionStats::TlsServerOperationStats decrypt_ticket_stats;
decrypt_ticket_stats.success = (result == ssl_ticket_aead_success);
if (async_op_timer_.has_value()) {
async_op_timer_->Stop(now());
decrypt_ticket_stats.async_latency = async_op_timer_->GetTotalElapsedTime();
async_op_timer_.reset();
RECORD_LATENCY_IN_US("tls_server_async_decrypt_ticket_latency_us",
decrypt_ticket_stats.async_latency,
"Async decrypt ticket latency in microseconds");
}
connection_stats().tls_server_decrypt_ticket_stats =
std::move(decrypt_ticket_stats);
return re | #include "quiche/quic/core/tls_server_handshaker.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/certificate_util.h"
#include "quiche/quic/core/crypto/client_proof_source.h"
#include "quiche/quic/core/crypto/proof_source.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_crypto_client_stream.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/core/tls_client_handshaker.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/failing_proof_source.h"
#include "quiche/quic/test_tools/fake_proof_source.h"
#include "quiche/quic/test_tools/fake_proof_source_handle.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simple_session_cache.h"
#include "quiche/quic/test_tools/test_certificates.h"
#include "quiche/quic/test_tools/test_ticket_crypter.h"
namespace quic {
class QuicConnection;
class QuicStream;
}
using testing::_;
using testing::NiceMock;
using testing::Return;
namespace quic {
namespace test {
namespace {
const char kServerHostname[] = "test.example.com";
const uint16_t kServerPort = 443;
struct TestParams {
ParsedQuicVersion version;
bool disable_resumption;
};
std::string PrintToString(const TestParams& p) {
return absl::StrCat(
ParsedQuicVersionToString(p.version), "_",
(p.disable_resumption ? "ResumptionDisabled" : "ResumptionEnabled"));
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
for (const auto& version : AllSupportedVersionsWithTls()) {
for (bool disable_resumption : {false, true}) {
params.push_back(TestParams{version, disable_resumption});
}
}
return params;
}
class TestTlsServerHandshaker : public TlsServerHandshaker {
public:
static constexpr TransportParameters::TransportParameterId
kFailHandshakeParam{0xFFEACA};
TestTlsServerHandshaker(QuicSession* session,
const QuicCryptoServerConfig* crypto_config)
: TlsServerHandshaker(session, crypto_config),
proof_source_(crypto_config->proof_source()) {
ON_CALL(*this, MaybeCreateProofSourceHandle())
.WillByDefault(testing::Invoke(
this, &TestTlsServerHandshaker::RealMaybeCreateProofSourceHandle));
ON_CALL(*this, OverrideQuicConfigDefaults(_))
.WillByDefault(testing::Invoke(
this, &TestTlsServerHandshaker::RealOverrideQuicConfigDefaults));
}
MOCK_METHOD(std::unique_ptr<ProofSourceHandle>, MaybeCreateProofSourceHandle,
(), (override));
MOCK_METHOD(void, OverrideQuicConfigDefaults, (QuicConfig * config),
(override));
void SetupProofSourceHandle(
FakeProofSourceHandle::Action select_cert_action,
FakeProofSourceHandle::Action compute_signature_action,
QuicDelayedSSLConfig dealyed_ssl_config = QuicDelayedSSLConfig()) {
EXPECT_CALL(*this, MaybeCreateProofSourceHandle())
.WillOnce(
testing::Invoke([this, select_cert_action, compute_signature_action,
dealyed_ssl_config]() {
auto handle = std::make_unique<FakeProofSourceHandle>(
proof_source_, this, select_cert_action,
compute_signature_action, dealyed_ssl_config);
fake_proof_source_handle_ = handle.get();
return handle;
}));
}
FakeProofSourceHandle* fake_proof_source_handle() {
return fake_proof_source_handle_;
}
bool received_client_cert() const { return received_client_cert_; }
using TlsServerHandshaker::AdvanceHandshake;
using TlsServerHandshaker::expected_ssl_error;
protected:
QuicAsyncStatus VerifyCertChain(
const std::vector<std::string>& certs, std::string* error_details,
std::unique_ptr<ProofVerifyDetails>* details, uint8_t* out_alert,
std::unique_ptr<ProofVerifierCallback> callback) override {
received_client_cert_ = true;
return TlsServerHandshaker::VerifyCertChain(certs, error_details, details,
out_alert, std::move(callback));
}
bool ProcessAdditionalTransportParameters(
const TransportParameters& params) override {
return !params.custom_parameters.contains(kFailHandshakeParam);
}
private:
std::unique_ptr<ProofSourceHandle> RealMaybeCreateProofSourceHandle() {
return TlsServerHandshaker::MaybeCreateProofSourceHandle();
}
void RealOverrideQuicConfigDefaults(QuicConfig* config) {
return TlsServerHandshaker::OverrideQuicConfigDefaults(config);
}
FakeProofSourceHandle* fake_proof_source_handle_ = nullptr;
ProofSource* proof_source_ = nullptr;
bool received_client_cert_ = false;
};
class TlsServerHandshakerTestSession : public TestQuicSpdyServerSession {
public:
using TestQuicSpdyServerSession::TestQuicSpdyServerSession;
std::unique_ptr<QuicCryptoServerStreamBase> CreateQuicCryptoServerStream(
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* ) override {
if (connection()->version().handshake_protocol == PROTOCOL_TLS1_3) {
return std::make_unique<NiceMock<TestTlsServerHandshaker>>(this,
crypto_config);
}
QUICHE_CHECK(false) << "Unsupported handshake protocol: "
<< connection()->version().handshake_protocol;
return nullptr;
}
};
class TlsServerHandshakerTest : public QuicTestWithParam<TestParams> {
public:
TlsServerHandshakerTest()
: server_compressed_certs_cache_(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize),
server_id_(kServerHostname, kServerPort, false),
supported_versions_({GetParam().version}) {
SetQuicFlag(quic_disable_server_tls_resumption,
GetParam().disable_resumption);
client_crypto_config_ = std::make_unique<QuicCryptoClientConfig>(
crypto_test_utils::ProofVerifierForTesting(),
std::make_unique<test::SimpleSessionCache>());
InitializeServerConfig();
InitializeServer();
InitializeFakeClient();
}
~TlsServerHandshakerTest() override {
server_session_.reset();
client_session_.reset();
helpers_.clear();
alarm_factories_.clear();
}
void InitializeServerConfig() {
auto ticket_crypter = std::make_unique<TestTicketCrypter>();
ticket_crypter_ = ticket_crypter.get();
auto proof_source = std::make_unique<FakeProofSource>();
proof_source_ = proof_source.get();
proof_source_->SetTicketCrypter(std::move(ticket_crypter));
server_crypto_config_ = std::make_unique<QuicCryptoServerConfig>(
QuicCryptoServerConfig::TESTING, QuicRandom::GetInstance(),
std::move(proof_source), KeyExchangeSource::Default());
}
void InitializeServerConfigWithFailingProofSource() {
server_crypto_config_ = std::make_unique<QuicCryptoServerConfig>(
QuicCryptoServerConfig::TESTING, QuicRandom::GetInstance(),
std::make_unique<FailingProofSource>(), KeyExchangeSource::Default());
}
void CreateTlsServerHandshakerTestSession(MockQuicConnectionHelper* helper,
MockAlarmFactory* alarm_factory) {
server_connection_ = new PacketSavingConnection(
helper, alarm_factory, Perspective::IS_SERVER,
ParsedVersionOfIndex(supported_versions_, 0));
TlsServerHandshakerTestSession* server_session =
new TlsServerHandshakerTestSession(
server_connection_, DefaultQuicConfig(), supported_versions_,
server_crypto_config_.get(), &server_compressed_certs_cache_);
server_session->set_client_cert_mode(initial_client_cert_mode_);
server_session->Initialize();
server_connection_->AdvanceTime(QuicTime::Delta::FromSeconds(100000));
QUICHE_CHECK(server_session);
server_session_.reset(server_session);
}
void InitializeServerWithFakeProofSourceHandle() {
helpers_.push_back(std::make_unique<NiceMock<MockQuicConnectionHelper>>());
alarm_factories_.push_back(std::make_unique<MockAlarmFactory>());
CreateTlsServerHandshakerTestSession(helpers_.back().get(),
alarm_factories_.back().get());
server_handshaker_ = static_cast<NiceMock<TestTlsServerHandshaker>*>(
server_session_->GetMutableCryptoStream());
EXPECT_CALL(*server_session_->helper(), CanAcceptClientHello(_, _, _, _, _))
.Times(testing::AnyNumber());
EXPECT_CALL(*server_session_, SelectAlpn(_))
.WillRepeatedly([this](const std::vector<absl::string_view>& alpns) {
return std::find(
alpns.cbegin(), alpns.cend(),
AlpnForVersion(server_session_->connection()->version()));
});
crypto_test_utils::SetupCryptoServerConfigForTest(
server_connection_->clock(), server_connection_->random_generator(),
server_crypto_config_.get());
}
void InitializeServer() {
TestQuicSpdyServerSession* server_session = nullptr;
helpers_.push_back(std::make_unique<NiceMock<MockQuicConnectionHelper>>());
alarm_factories_.push_back(std::make_unique<MockAlarmFactory>());
CreateServerSessionForTest(
server_id_, QuicTime::Delta::FromSeconds(100000), supported_versions_,
helpers_.back().get(), alarm_factories_.back().get(),
server_crypto_config_.get(), &server_compressed_certs_cache_,
&server_connection_, &server_session);
QUICHE_CHECK(server_session);
server_session_.reset(server_session);
server_handshaker_ = nullptr;
EXPECT_CALL(*server_session_->helper(), CanAcceptClientHello(_, _, _, _, _))
.Times(testing::AnyNumber());
EXPECT_CALL(*server_session_, SelectAlpn(_))
.WillRepeatedly([this](const std::vector<absl::string_view>& alpns) {
return std::find(
alpns.cbegin(), alpns.cend(),
AlpnForVersion(server_session_->connection()->version()));
});
crypto_test_utils::SetupCryptoServerConfigForTest(
server_connection_->clock(), server_connection_->random_generator(),
server_crypto_config_.get());
}
QuicCryptoServerStreamBase* server_stream() {
return server_session_->GetMutableCryptoStream();
}
QuicCryptoClientStream* client_stream() {
return client_session_->GetMutableCryptoStream();
}
void InitializeFakeClient() {
TestQuicSpdyClientSession* client_session = nullptr;
helpers_.push_back(std::make_unique<NiceMock<MockQuicConnectionHelper>>());
alarm_factories_.push_back(std::make_unique<MockAlarmFactory>());
CreateClientSessionForTest(
server_id_, QuicTime::Delta::FromSeconds(100000), supported_versions_,
helpers_.back().get(), alarm_factories_.back().get(),
client_crypto_config_.get(), &client_connection_, &client_session);
const std::string default_alpn =
AlpnForVersion(client_connection_->version());
ON_CALL(*client_session, GetAlpnsToOffer())
.WillByDefault(Return(std::vector<std::string>({default_alpn})));
QUICHE_CHECK(client_session);
client_session_.reset(client_session);
moved_messages_counts_ = {0, 0};
}
void CompleteCryptoHandshake() {
while (!client_stream()->one_rtt_keys_available() ||
!server_stream()->one_rtt_keys_available()) {
auto previous_moved_messages_counts = moved_messages_counts_;
AdvanceHandshakeWithFakeClient();
ASSERT_NE(previous_moved_messages_counts, moved_messages_counts_);
}
}
void AdvanceHandshakeWithFakeClient() {
QUICHE_CHECK(server_connection_);
QUICHE_CHECK(client_session_ != nullptr);
EXPECT_CALL(*client_session_, OnProofValid(_)).Times(testing::AnyNumber());
EXPECT_CALL(*client_session_, OnProofVerifyDetailsAvailable(_))
.Times(testing::AnyNumber());
EXPECT_CALL(*client_connection_, OnCanWrite()).Times(testing::AnyNumber());
EXPECT_CALL(*server_connection_, OnCanWrite()).Times(testing::AnyNumber());
if (moved_messages_counts_.first == 0) {
client_stream()->CryptoConnect();
}
moved_messages_counts_ = crypto_test_utils::AdvanceHandshake(
client_connection_, client_stream(), moved_messages_counts_.first,
server_connection_, server_stream(), moved_messages_counts_.second);
}
void ExpectHandshakeSuccessful() {
EXPECT_TRUE(client_stream()->one_rtt_keys_available());
EXPECT_TRUE(client_stream()->encryption_established());
EXPECT_TRUE(server_stream()->one_rtt_keys_available());
EXPECT_TRUE(server_stream()->encryption_established());
EXPECT_EQ(HANDSHAKE_COMPLETE, client_stream()->GetHandshakeState());
EXPECT_EQ(HANDSHAKE_CONFIRMED, server_stream()->GetHandshakeState());
const auto& client_crypto_params =
client_stream()->crypto_negotiated_params();
const auto& server_crypto_params =
server_stream()->crypto_negotiated_params();
EXPECT_NE(0, client_crypto_params.cipher_suite);
EXPECT_NE(0, client_crypto_params.key_exchange_group);
EXPECT_NE(0, client_crypto_params.peer_signature_algorithm);
EXPECT_EQ(client_crypto_params.cipher_suite,
server_crypto_params.cipher_suite);
EXPECT_EQ(client_crypto_params.key_exchange_group,
server_crypto_params.key_exchange_group);
EXPECT_EQ(0, server_crypto_params.peer_signature_algorithm);
}
FakeProofSourceHandle::SelectCertArgs last_select_cert_args() const {
QUICHE_CHECK(server_handshaker_ &&
server_handshaker_->fake_proof_source_handle());
QUICHE_CHECK(!server_handshaker_->fake_proof_source_handle()
->all_select_cert_args()
.empty());
return server_handshaker_->fake_proof_source_handle()
->all_select_cert_args()
.back();
}
FakeProofSourceHandle::ComputeSignatureArgs last_compute_signature_args()
const {
QUICHE_CHECK(server_handshaker_ &&
server_handshaker_->fake_proof_source_handle());
QUICHE_CHECK(!server_handshaker_->fake_proof_source_handle()
->all_compute_signature_args()
.empty());
return server_handshaker_->fake_proof_source_handle()
->all_compute_signature_args()
.back();
}
protected:
bool SetupClientCert() {
auto client_proof_source = std::make_unique<DefaultClientProofSource>();
CertificatePrivateKey client_cert_key(
MakeKeyPairForSelfSignedCertificate());
CertificateOptions options;
options.subject = "CN=subject";
options.serial_number = 0x12345678;
options.validity_start = {2020, 1, 1, 0, 0, 0};
options.validity_end = {2049, 12, 31, 0, 0, 0};
std::string der_cert =
CreateSelfSignedCertificate(*client_cert_key.private_key(), options);
quiche::QuicheReferenceCountedPointer<ClientProofSource::Chain>
client_cert_chain(new ClientProofSource::Chain({der_cert}));
if (!client_proof_source->AddCertAndKey({"*"}, client_cert_chain,
std::move(client_cert_key))) {
return false;
}
client_crypto_config_->set_proof_source(std::move(client_proof_source));
return true;
}
std::vector<std::unique_ptr<MockQuicConnectionHelper>> helpers_;
std::vector<std::unique_ptr<MockAlarmFactory>> alarm_factories_;
PacketSavingConnection* server_connection_;
std::unique_ptr<TestQuicSpdyServerSession> server_session_;
NiceMock<TestTlsServerHandshaker>* server_handshaker_ = nullptr;
TestTicketCrypter* ticket_crypter_;
FakeProofSource* proof_source_;
std::unique_ptr<QuicCryptoServerConfig> server_crypto_config_;
QuicCompressedCertsCache server_compressed_certs_cache_;
QuicServerId server_id_;
ClientCertMode initial_client_cert_mode_ = ClientCertMode::kNone;
PacketSavingConnection* client_connection_;
std::unique_ptr<QuicCryptoClientConfig> client_crypto_config_;
std::unique_ptr<TestQuicSpdyClientSession> client_session_;
crypto_test_utils::FakeClientOptions client_options_;
std::pair<size_t, size_t> moved_messages_counts_ = {0, 0};
ParsedQuicVersionVector supported_versions_;
};
INSTANTIATE_TEST_SUITE_P(TlsServerHandshakerTests, TlsServerHandshakerTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(TlsServerHandshakerTest, NotInitiallyConected) {
EXPECT_FALSE(server_stream()->encryption_established());
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
}
TEST_P(TlsServerHandshakerTest, ConnectedAfterTlsHandshake) {
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, server_stream()->handshake_protocol());
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, HandshakeWithAsyncSelectCertSuccess) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, HandshakeWithAsyncSelectCertFailure) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::FAIL_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
EXPECT_EQ(moved_messages_counts_.second, 0u);
}
TEST_P(TlsServerHandshakerTest, HandshakeWithAsyncSelectCertAndSignature) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_ASYNC);
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
EXPECT_EQ(server_handshaker_->expected_ssl_error(),
SSL_ERROR_PENDING_CERTIFICATE);
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
EXPECT_EQ(server_handshaker_->expected_ssl_error(),
SSL_ERROR_WANT_PRIVATE_KEY_OPERATION);
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, HandshakeWithAsyncSignature) {
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
proof_source_->Activate();
AdvanceHandshakeWithFakeClient();
ASSERT_EQ(proof_source_->NumPendingCallbacks(), 1);
proof_source_->InvokePendingCallback(0);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, CancelPendingSelectCert) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->CancelOutstandingCallbacks();
ASSERT_FALSE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
}
TEST_P(TlsServerHandshakerTest, CancelPendingSignature) {
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
proof_source_->Activate();
AdvanceHandshakeWithFakeClient();
ASSERT_EQ(proof_source_->NumPendingCallbacks(), 1);
server_session_ = nullptr;
proof_source_->InvokePendingCallback(0);
}
TEST_P(TlsServerHandshakerTest, ExtractSNI) {
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_EQ(server_stream()->crypto_negotiated_params().sni,
"test.example.com");
}
TEST_P(TlsServerHandshakerTest, ServerConnectionIdPassedToSelectCert) {
InitializeServerWithFakeProofSourceHandle();
server_session_->set_early_data_enabled(false);
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_EQ(last_select_cert_args().original_connection_id, TestConnectionId());
}
TEST_P(TlsServerHandshakerTest, HostnameForCertSelectionAndComputeSignature) {
server_id_ = QuicServerId("tEsT.EXAMPLE.CoM", kServerPort, false);
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_EQ(server_stream()->crypto_negotiated_params().sni,
"test.example.com");
EXPECT_EQ(last_select_cert_args().hostname, "test.example.com");
EXPECT_EQ(last_compute_signature_args().hostname, "test.example.com");
}
TEST_P(TlsServerHandshakerTest, SSLConfigForCertSelection) {
InitializeServerWithFakeProofSourceHandle();
server_session_->set_early_data_enabled(false);
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(last_select_cert_args().ssl_config.early_data_enabled);
}
TEST_P(TlsServerHandshakerTest, ConnectionClosedOnTlsError) {
EXPECT_CALL(*server_connection_,
CloseConnection(QUIC_HANDSHAKE_FAILED, _, _, _));
char bogus_handshake_message[] = {
1,
0, 0, 0,
};
QuicConnection::ScopedPacketFlusher flusher(server_connection_);
server_stream()->crypto_message_parser()->ProcessInput(
absl::string_view(bogus_handshake_message,
ABSL_ARRAYSIZE(bogus_handshake_message)),
ENCRYPTION_INITIAL);
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
}
TEST_P(TlsServerHandshakerTest, ClientSendingBadALPN) {
const std::string kTestBadClientAlpn = "bad-client-alpn";
EXPECT_CALL(*client_session_, GetAlpnsToOffer())
.WillOnce(Return(std::vector<std::string>({kTestBadClientAlpn})));
EXPECT_CALL(*server_connection_,
CloseConnection(QUIC_HANDSHAKE_FAILED,
static_cast<QuicIetfTransportErrorCodes>(
CRYPTO_ERROR_FIRST + 120),
"TLS handshake failure (ENCRYPTION_INITIAL) 120: "
"no application protocol",
_));
AdvanceHandshakeWithFakeClient();
EXPECT_FALSE(client_stream()->one_rtt_keys_available());
EXPECT_FALSE(client_stream()->encryption_established());
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
EXPECT_FALSE(server_stream()->encryption_established());
}
TEST_P(TlsServerHandshakerTest, CustomALPNNegotiation) {
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
const std::string kTestAlpn = "A Custom ALPN Value";
const std::vector<std::string> kTestAlpns(
{"foo", "bar", kTestAlpn, "something else"});
EXPECT_CALL(*client_session_, GetAlpnsToOffer())
.WillRepeatedly(Return(kTestAlpns));
EXPECT_CALL(*server_session_, SelectAlpn(_))
.WillOnce(
[kTestAlpn, kTestAlpns](const std::vector<absl::string_view>& alpns) {
EXPECT_THAT(alpns, testing::ElementsAreArray(kTestAlpns));
return std::find(alpns.cbegin(), alpns.cend(), kTestAlpn);
});
EXPECT_CALL(*client_session_, OnAlpnSelected(absl::string_view(kTestAlpn)));
EXPECT_CALL(*server_session_, OnAlpnSelected(absl::string_view(kTestAlpn)));
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, RejectInvalidSNI) {
server_id_ = QuicServerId("invalid!.example.com", kServerPort, false);
InitializeFakeClient();
static_cast<TlsClientHandshaker*>(
QuicCryptoClientStreamPeer::GetHandshaker(client_stream()))
->AllowInvalidSNIForTests();
AdvanceHandshakeWithFakeClient();
EXPECT_FALSE(server_stream()->encryption_established());
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
}
TEST_P(TlsServerHandshakerTest, Resumption) {
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsResumption());
EXPECT_FALSE(server_stream()->ResumptionAttempted());
InitializeServer();
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_NE(client_stream()->IsResumption(), GetParam().disable_resumption);
EXPECT_NE(server_stream()->IsResumption(), GetParam().disable_resumption);
EXPECT_NE(server_stream()->ResumptionAttempted(),
GetParam().disable_resumption);
}
TEST_P(TlsServerHandshakerTest, ResumptionWithAsyncDecryptCallback) {
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
ticket_crypter_->SetRunCallbacksAsync(true);
InitializeServer();
InitializeFakeClient();
AdvanceHandshakeWithFakeClient();
if (GetParam().disable_resumption) {
ASSERT_EQ(ticket_crypter_->NumPendingCallbacks(), 0u);
return;
}
ASSERT_EQ(ticket_crypter_->NumPendingCallbacks(), 1u);
ticket_crypter_->RunPendingCallback(0);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_TRUE(client_stream()->IsResumption());
EXPECT_TRUE(server_stream()->IsResumption());
EXPECT_TRUE(server_stream()->ResumptionAttempted());
}
TEST_P(TlsServerHandshakerTest, ResumptionWithPlaceholderTicket) {
InitializeFakeClient();
ticket_crypter_->set_fail_encrypt(true);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsResumption());
EXPECT_FALSE(server_stream()->ResumptionAttempted());
InitializeServer();
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsResumption());
EXPECT_NE(server_stream()->ResumptionAttempted(),
GetParam().disable_resumption);
}
TEST_P(TlsServerHandshakerTest, AdvanceHandshakeDuringAsyncDecryptCallback) {
if (GetParam().disable_resumption) {
return;
}
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
ticket_crypter_->SetRunCallbacksAsync(true);
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSou | void TlsServerHandshaker::CancelOutstandingCallbacks() {
if (proof_source_handle_) {
proof_source_handle_->CloseHandle();
}
if (ticket_decryption_callback_) {
ticket_decryption_callback_->Cancel();
ticket_decryption_callback_ = nullptr;
}
} | TEST_P(TlsServerHandshakerTest, CancelPendingSelectCert) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->CancelOutstandingCallbacks();
ASSERT_FALSE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
}
TEST_P(TlsServerHandshakerTest, CancelPendingSignature) {
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
proof_source_->Activate();
AdvanceHandshakeWithFakeClient();
ASSERT_EQ(proof_source_->NumPendingCallbacks(), 1);
server_session_ = nullptr;
proof_source_->InvokePendingCallback(0);
} |
#include "tsl/platform/mutex.h"
#include <time.h>
#include "nsync_cv.h"
#include "nsync_mu.h"
#include "nsync_mu_wait.h"
#include "nsync_time.h"
namespace tsl {
static_assert(sizeof(nsync::nsync_mu) <= sizeof(internal::MuData),
"tsl::internal::MuData needs to be bigger");
static inline nsync::nsync_mu *mu_cast(internal::MuData *mu) {
return reinterpret_cast<nsync::nsync_mu *>(mu);
}
static inline const nsync::nsync_mu *mu_cast(const internal::MuData *mu) {
return reinterpret_cast<const nsync::nsync_mu *>(mu);
}
mutex::mutex() { nsync::nsync_mu_init(mu_cast(&mu_)); }
void mutex::lock() { nsync::nsync_mu_lock(mu_cast(&mu_)); }
bool mutex::try_lock() { return nsync::nsync_mu_trylock(mu_cast(&mu_)) != 0; };
void mutex::unlock() { nsync::nsync_mu_unlock(mu_cast(&mu_)); }
void mutex::assert_held() const TF_ASSERT_EXCLUSIVE_LOCK() {
nsync::nsync_mu_assert_held(mu_cast(&mu_));
}
void mutex::lock_shared() { nsync::nsync_mu_rlock(mu_cast(&mu_)); }
bool mutex::try_lock_shared() {
return nsync::nsync_mu_rtrylock(mu_cast(&mu_)) != 0;
};
void mutex::unlock_shared() { nsync::nsync_mu_runlock(mu_cast(&mu_)); }
void mutex::assert_held_shared() const TF_ASSERT_SHARED_LOCK() {
nsync::nsync_mu_rassert_held(mu_cast(&mu_));
}
static int EvaluateCondition(const void *vcond) {
return static_cast<int>(static_cast<const Condition *>(vcond)->Eval());
}
void mutex::Await(const Condition &cond) {
nsync::nsync_mu_wait(mu_cast(&mu_), &EvaluateCondition, &cond, nullptr);
}
bool mutex::AwaitWithDeadline(const Condition &cond, uint64 abs_deadline_ns) {
time_t seconds = abs_deadline_ns / (1000 * 1000 * 1000);
nsync::nsync_time abs_time = nsync::nsync_time_s_ns(
seconds, abs_deadline_ns - seconds * (1000 * 1000 * 1000));
return nsync::nsync_mu_wait_with_deadline(mu_cast(&mu_), &EvaluateCondition,
&cond, nullptr, abs_time,
nullptr) == 0;
}
static_assert(sizeof(nsync::nsync_cv) <= sizeof(internal::CVData),
"tsl::internal::CVData needs to be bigger");
static inline nsync::nsync_cv *cv_cast(internal::CVData *cv) {
return reinterpret_cast<nsync::nsync_cv *>(cv);
}
condition_variable::condition_variable() {
nsync::nsync_cv_init(cv_cast(&cv_));
}
void condition_variable::wait(mutex_lock &lock) {
nsync::nsync_cv_wait(cv_cast(&cv_), mu_cast(&lock.mutex()->mu_));
}
void condition_variable::notify_one() { nsync::nsync_cv_signal(cv_cast(&cv_)); }
void condition_variable::notify_all() {
nsync::nsync_cv_broadcast(cv_cast(&cv_));
}
namespace internal {
std::cv_status wait_until_system_clock(
CVData *cv_data, MuData *mu_data,
const std::chrono::system_clock::time_point timeout_time) {
int r = nsync::nsync_cv_wait_with_deadline(cv_cast(cv_data), mu_cast(mu_data),
timeout_time, nullptr);
return r ? std::cv_status::timeout : std::cv_status::no_timeout;
}
}
} | #include "tsl/platform/mutex.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace {
class MutexTest : public ::testing::Test {
protected:
mutex_lock GetLock() TF_NO_THREAD_SAFETY_ANALYSIS {
return mutex_lock{mu_};
}
tf_shared_lock GetSharedLock() TF_NO_THREAD_SAFETY_ANALYSIS {
return tf_shared_lock{mu_};
}
bool test_try_lock() {
bool test = mu_.try_lock();
if (test) mu_.unlock();
return test;
}
bool test_try_lock_shared() {
bool test = mu_.try_lock_shared();
if (test) mu_.unlock_shared();
return test;
}
mutex mu_;
};
TEST_F(MutexTest, MovableMutexLockTest) {
EXPECT_TRUE(test_try_lock());
{
mutex_lock lock = GetLock();
EXPECT_FALSE(test_try_lock());
EXPECT_FALSE(test_try_lock_shared());
}
EXPECT_TRUE(test_try_lock());
}
TEST_F(MutexTest, SharedMutexLockTest) {
EXPECT_TRUE(test_try_lock());
{
tf_shared_lock lock = GetSharedLock();
EXPECT_FALSE(test_try_lock());
EXPECT_TRUE(test_try_lock_shared());
}
EXPECT_TRUE(test_try_lock());
}
TEST(ConditionVariableTest, WaitWithPredicate) {
constexpr int kNumThreads = 4;
mutex mu;
condition_variable cv;
bool ready = false;
int count = 0;
tsl::thread::ThreadPool pool(Env::Default(),
"condition_variable_test_wait_with_predicate",
kNumThreads);
for (int i = 0; i < kNumThreads; ++i) {
pool.Schedule([&mu, &cv, &ready, &count]() {
mutex_lock lock(mu);
cv.wait(lock, [&ready] { return ready; });
++count;
cv.notify_one();
});
}
{
mutex_lock lock(mu);
EXPECT_EQ(count, 0);
}
{
mutex_lock lock(mu);
ready = true;
cv.notify_all();
}
{
mutex_lock lock(mu);
cv.wait(lock, [&count, kNumThreads] { return count == kNumThreads; });
EXPECT_EQ(count, kNumThreads);
}
}
TEST(ConditionVariableTest, WaitWithTruePredicateDoesntBlock) {
mutex mu;
mutex_lock lock(mu);
condition_variable cv;
cv.wait(lock, [] { return true; });
EXPECT_TRUE(static_cast<bool>(lock));
}
}
} | bool mutex::try_lock_shared() {
return nsync::nsync_mu_rtrylock(mu_cast(&mu_)) != 0;
} | TEST_F(MutexTest, SharedMutexLockTest) {
EXPECT_TRUE(test_try_lock());
{
tf_shared_lock lock = GetSharedLock();
EXPECT_FALSE(test_try_lock());
EXPECT_TRUE(test_try_lock_shared());
}
EXPECT_TRUE(test_try_lock());
} |
#ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_ADAPTIVE_SHARED_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_ADAPTIVE_SHARED_BATCH_SCHEDULER_H_
#include <algorithm>
#include <atomic>
#include <functional>
#include <memory>
#include <random>
#include <unordered_map>
#include <vector>
#include "absl/types/optional.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class ASBSBatch;
template <typename TaskType>
class ASBSQueue;
}
template <typename TaskType>
class AdaptiveSharedBatchScheduler
: public std::enable_shared_from_this<
AdaptiveSharedBatchScheduler<TaskType>> {
public:
~AdaptiveSharedBatchScheduler() {
if (owned_batch_thread_pool_) {
delete batch_thread_pool_;
}
}
struct Options {
string thread_pool_name = {"batch_threads"};
int64_t num_batch_threads = port::MaxParallelism();
thread::ThreadPool* thread_pool = nullptr;
int64_t min_in_flight_batches_limit = 1;
int64_t full_batch_scheduling_boost_micros = 0;
Env* env = Env::Default();
double initial_in_flight_batches_limit = 3;
int64_t batches_to_average_over = 1000;
bool fifo_scheduling = false;
};
static Status Create(
const Options& options,
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>>* scheduler);
struct QueueOptions {
int max_batch_size = 1000;
absl::optional<int> max_input_task_size = absl::nullopt;
absl::optional<int> max_tasks_per_batch = absl::nullopt;
int max_enqueued_batches = 10;
int64_t batch_timeout_micros = 0;
std::function<Status(std::unique_ptr<TaskType>* input_task, int first_size,
int max_batch_size,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>
split_input_task_func;
bool disable_padding = false;
};
using BatchProcessor = std::function<void(std::unique_ptr<Batch<TaskType>>)>;
Status AddQueue(const QueueOptions& options,
BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue);
double in_flight_batches_limit() {
mutex_lock l(mu_);
return in_flight_batches_limit_;
}
private:
friend class internal::ASBSQueue<TaskType>;
explicit AdaptiveSharedBatchScheduler(const Options& options);
void CallbackWrapper(const internal::ASBSBatch<TaskType>* batch,
BatchProcessor callback, bool is_express);
void MaybeScheduleNextBatch() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleNextBatchFIFO() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleClosedBatches();
void MaybeScheduleClosedBatchesLocked() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleClosedBatchesLockedFIFO() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeAdjustInflightLimit() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void AddBatch(const internal::ASBSBatch<TaskType>* batch);
void RemoveQueue(const internal::ASBSQueue<TaskType>* queue);
Env* GetEnv() const { return options_.env; }
const Options options_;
std::vector<const internal::ASBSBatch<TaskType>*> batches_ TF_GUARDED_BY(mu_);
std::deque<const internal::ASBSBatch<TaskType>*> fifo_batches_
TF_GUARDED_BY(mu_);
std::unordered_map<const internal::ASBSQueue<TaskType>*, BatchProcessor>
queues_and_callbacks_ TF_GUARDED_BY(mu_);
mutex mu_;
thread::ThreadPool* batch_thread_pool_;
bool owned_batch_thread_pool_ = false;
double in_flight_batches_limit_ TF_GUARDED_BY(mu_);
int64_t in_flight_batches_ TF_GUARDED_BY(mu_) = 0;
int64_t in_flight_express_batches_ TF_GUARDED_BY(mu_) = 0;
std::default_random_engine rand_engine_;
std::uniform_real_distribution<double> rand_double_;
int64_t batch_count_ TF_GUARDED_BY(mu_) = 0;
struct DelayStats {
int64_t batch_latency_sum = 0;
double last_avg_latency_ms = 0;
bool last_latency_decreased = false;
int step_direction = 1;
};
DelayStats batch_delay_stats_ TF_GUARDED_BY(mu_);
constexpr static double kMaxStepSizeMultiplier = 0.125;
constexpr static double kMinStepSizeMultiplier = 0.0078125;
double step_size_multiplier_ TF_GUARDED_BY(mu_) = kMaxStepSizeMultiplier;
AdaptiveSharedBatchScheduler(const AdaptiveSharedBatchScheduler&) = delete;
void operator=(const AdaptiveSharedBatchScheduler&) = delete;
};
namespace internal {
template <typename TaskType>
class ASBSQueue : public BatchScheduler<TaskType> {
public:
using QueueOptions =
typename AdaptiveSharedBatchScheduler<TaskType>::QueueOptions;
ASBSQueue(std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>> scheduler,
const QueueOptions& options);
~ASBSQueue() override;
Status Schedule(std::unique_ptr<TaskType>* task) override;
size_t NumEnqueuedTasks() const override;
size_t SchedulingCapacity() const override;
void ReleaseBatch(const ASBSBatch<TaskType>* batch);
size_t max_task_size() const override { return options_.max_batch_size; }
private:
size_t SchedulingCapacityLocked() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
static uint64 NewTraceMeContextIdForBatch();
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>> scheduler_;
const QueueOptions options_;
ASBSBatch<TaskType>* current_batch_ TF_GUARDED_BY(mu_) = nullptr;
int64_t num_enqueued_batches_ TF_GUARDED_BY(mu_) = 0;
int64_t num_enqueued_tasks_ TF_GUARDED_BY(mu_) = 0;
mutable mutex mu_;
ASBSQueue(const ASBSQueue&) = delete;
void operator=(const ASBSQueue&) = delete;
};
template <typename TaskType>
class ASBSBatch : public Batch<TaskType> {
public:
ASBSBatch(ASBSQueue<TaskType>* queue, int64_t creation_time_micros,
int64_t batch_timeout_micros, uint64 traceme_context_id)
: queue_(queue),
creation_time_micros_(creation_time_micros),
schedulable_time_micros_(creation_time_micros + batch_timeout_micros),
traceme_context_id_(traceme_context_id) {}
~ASBSBatch() override {}
ASBSQueue<TaskType>* queue() const { return queue_; }
int64_t creation_time_micros() const { return creation_time_micros_; }
int64_t schedulable_time_micros() const { return schedulable_time_micros_; }
uint64 traceme_context_id() const { return traceme_context_id_; }
private:
ASBSQueue<TaskType>* queue_;
const int64_t creation_time_micros_;
const int64_t schedulable_time_micros_;
const uint64 traceme_context_id_;
ASBSBatch(const ASBSBatch&) = delete;
void operator=(const ASBSBatch&) = delete;
};
}
template <typename TaskType>
constexpr double AdaptiveSharedBatchScheduler<TaskType>::kMaxStepSizeMultiplier;
template <typename TaskType>
constexpr double AdaptiveSharedBatchScheduler<TaskType>::kMinStepSizeMultiplier;
template <typename TaskType>
Status AdaptiveSharedBatchScheduler<TaskType>::Create(
const Options& options,
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>>* scheduler) {
if (options.num_batch_threads < 1) {
return errors::InvalidArgument("num_batch_threads must be positive; was ",
options.num_batch_threads);
}
if (options.min_in_flight_batches_limit < 1) {
return errors::InvalidArgument(
"min_in_flight_batches_limit must be >= 1; was ",
options.min_in_flight_batches_limit);
}
if (options.min_in_flight_batches_limit > options.num_batch_threads) {
return errors::InvalidArgument(
"min_in_flight_batches_limit (", options.min_in_flight_batches_limit,
") must be <= num_batch_threads (", options.num_batch_threads, ")");
}
if (options.full_batch_scheduling_boost_micros < 0) {
return errors::InvalidArgument(
"full_batch_scheduling_boost_micros can't be negative; was ",
options.full_batch_scheduling_boost_micros);
}
if (options.initial_in_flight_batches_limit > options.num_batch_threads) {
return errors::InvalidArgument(
"initial_in_flight_batches_limit (",
options.initial_in_flight_batches_limit,
") should not be larger than num_batch_threads (",
options.num_batch_threads, ")");
}
if (options.initial_in_flight_batches_limit <
options.min_in_flight_batches_limit) {
return errors::InvalidArgument("initial_in_flight_batches_limit (",
options.initial_in_flight_batches_limit,
"must be >= min_in_flight_batches_limit (",
options.min_in_flight_batches_limit, ")");
}
if (options.batches_to_average_over < 1) {
return errors::InvalidArgument(
"batches_to_average_over should be "
"greater than or equal to 1; was ",
options.batches_to_average_over);
}
scheduler->reset(new AdaptiveSharedBatchScheduler<TaskType>(options));
return absl::OkStatus();
}
template <typename TaskType>
AdaptiveSharedBatchScheduler<TaskType>::AdaptiveSharedBatchScheduler(
const Options& options)
: options_(options),
in_flight_batches_limit_(options.initial_in_flight_batches_limit),
rand_double_(0.0, 1.0) {
std::random_device device;
rand_engine_.seed(device());
if (options.thread_pool == nullptr) {
owned_batch_thread_pool_ = true;
batch_thread_pool_ = new thread::ThreadPool(
GetEnv(), options.thread_pool_name, options.num_batch_threads);
} else {
owned_batch_thread_pool_ = false;
batch_thread_pool_ = options.thread_pool;
}
}
template <typename TaskType>
Status AdaptiveSharedBatchScheduler<TaskType>::AddQueue(
const QueueOptions& options, BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue) {
if (options.max_batch_size <= 0) {
return errors::InvalidArgument("max_batch_size must be positive; was ",
options.max_batch_size);
}
if (options.max_enqueued_batches <= 0) {
return errors::InvalidArgument(
"max_enqueued_batches must be positive; was ",
options.max_enqueued_batches);
}
if (options.max_input_task_size.has_value()) {
if (options.max_input_task_size.value() < options.max_batch_size) {
return errors::InvalidArgument(
"max_input_task_size must be larger than or equal to max_batch_size;"
"got max_input_task_size as ",
options.max_input_task_size.value(), " and max_batch_size as ",
options.max_batch_size);
}
}
internal::ASBSQueue<TaskType>* asbs_queue_raw;
queue->reset(asbs_queue_raw = new internal::ASBSQueue<TaskType>(
this->shared_from_this(), options));
mutex_lock l(mu_);
queues_and_callbacks_[asbs_queue_raw] = process_batch_callback;
return absl::OkStatus();
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::AddBatch(
const internal::ASBSBatch<TaskType>* batch) {
mutex_lock l(mu_);
if (options_.fifo_scheduling) {
fifo_batches_.push_back(batch);
} else {
batches_.push_back(batch);
}
int64_t delay_micros =
batch->schedulable_time_micros() - GetEnv()->NowMicros();
if (delay_micros <= 0) {
MaybeScheduleNextBatch();
return;
}
GetEnv()->SchedClosureAfter(
delay_micros, [this, lifetime_preserver = this->shared_from_this()] {
mutex_lock l(mu_);
MaybeScheduleNextBatch();
});
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::RemoveQueue(
const internal::ASBSQueue<TaskType>* queue) {
mutex_lock l(mu_);
queues_and_callbacks_.erase(queue);
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeScheduleNextBatchFIFO() {
const internal::ASBSBatch<TaskType>* batch = *fifo_batches_.begin();
if (batch->schedulable_time_micros() > GetEnv()->NowMicros()) {
return;
}
fifo_batches_.pop_front();
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(std::bind(
&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper, this, batch,
queues_and_callbacks_[batch->queue()], false ));
in_flight_batches_++;
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<
TaskType>::MaybeScheduleClosedBatchesLockedFIFO() {
int available_threads =
static_cast<int>(options_.num_batch_threads - in_flight_batches_ -
in_flight_express_batches_);
for (auto it = fifo_batches_.begin();
it != fifo_batches_.end() && available_threads > 0;
it = fifo_batches_.begin()) {
if ((*it)->IsClosed()) {
const internal::ASBSBatch<TaskType>* batch = *it;
fifo_batches_.pop_front();
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(
std::bind(&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper,
this, batch, queues_and_callbacks_[batch->queue()], true));
in_flight_express_batches_++;
available_threads--;
} else {
break;
}
}
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeScheduleNextBatch() {
bool batch_empty =
options_.fifo_scheduling ? fifo_batches_.empty() : batches_.empty();
if (batch_empty || in_flight_batches_ >= in_flight_batches_limit_) return;
if (in_flight_batches_limit_ - in_flight_batches_ < 1 &&
rand_double_(rand_engine_) >
in_flight_batches_limit_ - in_flight_batches_) {
return;
}
if (options_.fifo_scheduling) {
MaybeScheduleNextBatchFIFO();
return;
}
auto best_it = batches_.end();
double best_score = (std::numeric_limits<double>::max)();
int64_t now_micros = GetEnv()->NowMicros();
for (auto it = batches_.begin(); it != batches_.end(); it++) {
if ((*it)->schedulable_time_micros() > now_micros) continue;
const double score =
(*it)->creation_time_micros() -
options_.full_batch_scheduling_boost_micros * (*it)->size() /
static_cast<double>((*it)->queue()->max_task_size());
if (best_it == batches_.end() || score < best_score) {
best_score = score;
best_it = it;
}
}
if (best_it == batches_.end()) return;
const internal::ASBSBatch<TaskType>* batch = *best_it;
batches_.erase(best_it);
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(
std::bind(&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper, this,
batch, queues_and_callbacks_[batch->queue()], false));
in_flight_batches_++;
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeScheduleClosedBatches() {
mutex_lock l(mu_);
MaybeScheduleClosedBatchesLocked();
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<
TaskType>::MaybeScheduleClosedBatchesLocked() {
if (options_.fifo_scheduling) {
MaybeScheduleClosedBatchesLockedFIFO();
return;
}
int available_threads =
static_cast<int>(options_.num_batch_threads - in_flight_batches_ -
in_flight_express_batches_);
for (auto it = batches_.begin();
it != batches_.end() && available_threads > 0;) {
if ((*it)->IsClosed()) {
const internal::ASBSBatch<TaskType>* batch = *it;
it = batches_.erase(it);
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(
std::bind(&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper,
this, batch, queues_and_callbacks_[batch->queue()], true));
in_flight_express_batches_++;
available_threads--;
} else {
++it;
}
}
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper(
const internal::ASBSBatch<TaskType>* batch,
AdaptiveSharedBatchScheduler<TaskType>::BatchProcessor callback,
bool is_express) {
tsl::profiler::TraceMeConsumer trace_me(
[&] {
return profiler::TraceMeEncode(
"ProcessBatch", {{"batch_size_before_padding", batch->size()},
{"_r", 2} });
},
tsl::profiler::ContextType::kAdaptiveSharedBatchScheduler,
batch->traceme_context_id());
const int64_t start_time = batch->creation_time_micros();
callback(std::unique_ptr<Batch<TaskType>>(
const_cast<internal::ASBSBatch<TaskType>*>(batch)));
int64_t end_time = GetEnv()->NowMicros();
mutex_lock l(mu_);
if (is_express) {
in_flight_express_batches_--;
MaybeScheduleClosedBatchesLocked();
return;
}
in_flight_batches_--;
batch_count_++;
batch_delay_stats_.batch_latency_sum += end_time - start_time;
MaybeAdjustInflightLimit();
MaybeScheduleNextBatch();
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeAdjustInflightLimit() {
if (batch_count_ == options_.batches_to_average_over) {
double current_avg_latency_ms =
(batch_delay_stats_.batch_latency_sum / 1000.) / batch_count_;
bool current_latency_decreased =
current_avg_latency_ms < batch_delay_stats_.last_avg_latency_ms;
if (current_latency_decreased) {
step_size_multiplier_ *=
(batch_delay_stats_.last_latency_decreased ? 2 : 0.5);
step_size_multiplier_ =
std::min(step_size_multiplier_, kMaxStepSizeMultiplier);
step_size_multiplier_ =
std::max(step_size_multiplier_, kMinStepSizeMultiplier);
} else {
batch_delay_stats_.step_direction = -batch_delay_stats_.step_direction;
}
in_flight_batches_limit_ += batch_delay_stats_.step_direction *
in_flight_batches_limit_ *
step_size_multiplier_;
in_flight_batches_limit_ =
std::min(in_flight_batches_limit_,
static_cast<double>(options_.num_batch_threads));
in_flight_batches_limit_ =
std::max(in_flight_batches_limit_,
static_cast<double>(options_.min_in_flight_batches_limit));
batch_delay_stats_.last_avg_latency_ms = current_avg_latency_ms;
batch_delay_stats_.last_latency_decreased = current_latency_decreased;
batch_count_ = 0;
batch_delay_stats_.batch_latency_sum = 0;
}
}
namespace internal {
template <typename TaskType>
ASBSQueue<TaskType>::ASBSQueue(
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>> scheduler,
const QueueOptions& options)
: scheduler_(scheduler), options_(options) {}
template <typename TaskType>
ASBSQueue<TaskType>::~ASBSQueue() {
const int kSleepMicros = 1000;
for (;;) {
{
mutex_lock l(mu_);
if (num_enqueued_batches_ == 0) {
break;
}
}
scheduler_->GetEnv()->SleepForMicroseconds(kSleepMicros);
}
scheduler_->RemoveQueue(this);
}
template <typename TaskType>
Status ASBSQueue<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
size_t size = (*task)->size();
if (options_.split_input_task_func == nullptr &&
size > options_.max_batch_size) {
return errors::InvalidArgument("Task size ", size,
" is larger than maximum batch size ",
options_.max_batch_size);
}
if (options_.max_input_task_size.has_value() &&
(size > options_.max_input_task_size.value())) {
return errors::InvalidArgument("Task size ", size,
" is larger than max input task size ",
options_.max_input_task_size.value());
}
std::vector<std::unique_ptr<TaskType>> tasks_to_schedule;
std::vector<ASBSBatch<TaskType>*> new_batches;
bool closed_batch = false;
{
mutex_lock l(mu_);
if (size > SchedulingCapacityLocked()) {
return errors::Unavailable("The batch scheduling queue is full");
}
int remaining_batch_size =
current_batch_ == nullptr
? options_.max_batch_size
: options_.max_batch_size - current_batch_->size();
if (options_.split_input_task_func == nullptr ||
size <= remaining_batch_size) {
tasks_to_schedule.push_back(std::move(*task));
} else {
TF_RETURN_IF_ERROR(options_.split_input_task_func(
task, remaining_batch_size, options_.max_batch_size,
&tasks_to_schedule));
}
for (auto& task : tasks_to_schedule) {
if (current_batch_ &&
current_batch_->size() + task->size() > options_.max_batch_size) {
current_batch_->Close();
closed_batch = true;
current_batch_ = nullptr;
}
if (!current_batch_) {
num_enqueued_batches_++;
current_batch_ = new ASBSBatch<TaskType>(
this, scheduler_->GetEnv()->NowMicros(),
options_.batch_timeout_micros, NewTraceMeContextIdForBatch());
new_batches.push_back(cu | #include "tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace anonymous {
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
void set_size(size_t size) { size_ = size; }
private:
size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
Status ScheduleTask(size_t task_size, BatchScheduler<FakeTask>* scheduler) {
std::unique_ptr<FakeTask> task(new FakeTask(task_size));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
std::unique_ptr<Thread> CreateFakeClockAdvancerThread(
test_util::FakeClockEnv* env, Notification* start, Notification* stop) {
return std::unique_ptr<Thread>(Env::Default()->StartThread(
{}, "FakeClockAdvancerThread", [env, start, stop] {
start->WaitForNotification();
while (!stop->HasBeenNotified()) {
env->AdvanceByMicroseconds(10);
Env::Default()->SleepForMicroseconds(10);
}
}));
}
TEST(AdaptiveSharedBatchSchedulerTest, BadOptions) {
using Scheduler = AdaptiveSharedBatchScheduler<FakeTask>;
std::shared_ptr<Scheduler> scheduler;
Scheduler::Options options;
options.num_batch_threads = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.initial_in_flight_batches_limit = 0.5;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.num_batch_threads = 5;
options.initial_in_flight_batches_limit = 8;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.batches_to_average_over = -5;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.min_in_flight_batches_limit = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.min_in_flight_batches_limit = 5;
options.num_batch_threads = 3;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.initial_in_flight_batches_limit = 1;
options.min_in_flight_batches_limit = 2;
options.num_batch_threads = 3;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
}
TEST(AdaptiveSharedBatchSchedulerTest, InFlightBatchesLimit) {
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 2;
options.batches_to_average_over = 1000;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 2) {
Env::Default()->SleepForMicroseconds(1000);
finish_processing.Notify();
}
if (batch_num == 3) {
ASSERT_TRUE(finish_processing.HasBeenNotified());
}
finish_processing.WaitForNotification();
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
}
TEST(AdaptiveSharedBatchSchedulerTest, InFlightBatchesLimitTuning) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 2;
options.batches_to_average_over = 1;
auto queue_callback = [&env](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
switch (batch->size()) {
case 0:
env.AdvanceByMicroseconds(10);
break;
case 1:
env.AdvanceByMicroseconds(15);
break;
case 2:
env.AdvanceByMicroseconds(10);
break;
case 3:
env.AdvanceByMicroseconds(11);
break;
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(0, queue.get()));
double in_flight_batches_limit = 2;
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_LT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_GT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_GT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_LT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, FullBatchSchedulingBoostMicros) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 1;
options.num_batch_threads = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 100;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
finish_processing.WaitForNotification();
mutex_lock l(mu);
processed_batches++;
switch (processed_batches) {
case 1:
EXPECT_EQ(100, batch->size());
break;
case 2:
EXPECT_EQ(50, batch->size());
break;
case 3:
EXPECT_EQ(900, batch->size());
break;
case 4:
EXPECT_EQ(200, batch->size());
break;
default:
EXPECT_TRUE(false) << "Should only have 4 batches";
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue1));
queue_options.max_batch_size = 100;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue2));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
while (queue1->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(50, queue2.get()));
env.AdvanceByMicroseconds(45);
TF_ASSERT_OK(ScheduleTask(900, queue1.get()));
finish_processing.Notify();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, FIFO) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 1;
options.num_batch_threads = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 0;
options.fifo_scheduling = true;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
finish_processing.WaitForNotification();
mutex_lock l(mu);
processed_batches++;
switch (processed_batches) {
case 1:
EXPECT_EQ(100, batch->size());
break;
case 2:
EXPECT_EQ(200, batch->size());
break;
case 3:
EXPECT_EQ(50, batch->size());
break;
case 4:
EXPECT_EQ(900, batch->size());
break;
default:
EXPECT_TRUE(false) << "Should only have 4 batches";
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue1));
queue_options.max_batch_size = 100;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue2));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(30);
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(50, queue2.get()));
env.AdvanceByMicroseconds(45);
TF_ASSERT_OK(ScheduleTask(900, queue1.get()));
finish_processing.Notify();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, DeleteQueue) {
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.num_batch_threads = 1;
options.batches_to_average_over = 1000;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
finish_processing.WaitForNotification();
mu.lock();
processed_batches++;
mu.unlock();
};
auto processed_checker = gtl::MakeCleanup([&mu, &processed_batches] {
mutex_lock l(mu);
EXPECT_EQ(processed_batches, 2);
});
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
Env::Default()->SchedClosureAfter(
1000, [&finish_processing] { finish_processing.Notify(); });
}
TEST(AdaptiveSharedBatchSchedulerTest, QueueCapacityInfo) {
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 1) {
finish_processing.WaitForNotification();
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 1);
EXPECT_EQ(queue->SchedulingCapacity(), 9 * 1000 + 900);
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
TF_ASSERT_OK(ScheduleTask(200, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 3);
EXPECT_EQ(queue->SchedulingCapacity(), 9 * 1000 + 600);
TF_ASSERT_OK(ScheduleTask(700, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 1);
EXPECT_EQ(queue->SchedulingCapacity(), 9 * 1000 + 300);
finish_processing.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, FullBatches) {
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(AdaptiveSharedBatchScheduler<FakeTask>::Create({}, &scheduler));
auto queue_callback = [](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
};
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
queue_options.max_batch_size = 100;
queue_options.batch_timeout_micros = 1000000000000;
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
}
TEST(AdaptiveSharedBatchSchedulerTest, TruncateBatches) {
mutex mu;
int processed_batches = 0;
auto queue_callback =
[&mu, &processed_batches](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
mutex_lock l(mu);
++processed_batches;
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(AdaptiveSharedBatchScheduler<FakeTask>::Create({}, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
queue_options.max_batch_size = 100;
queue_options.batch_timeout_micros = 1000000;
queue_options.split_input_task_func =
[](std::unique_ptr<FakeTask>* input_task, int first_size, int max_size,
std::vector<std::unique_ptr<FakeTask>>* output_tasks) {
EXPECT_EQ(first_size, 70);
output_tasks->push_back(std::move(*input_task));
int remaining_size = output_tasks->back()->size() - first_size;
output_tasks->back()->set_size(first_size);
while (remaining_size > 0) {
int task_size = std::min(remaining_size, max_size);
output_tasks->emplace_back(new FakeTask(task_size));
remaining_size -= task_size;
}
return absl::OkStatus();
};
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(30, queue.get()));
TF_ASSERT_OK(ScheduleTask(350, queue.get()));
while (true) {
mutex_lock l(mu);
if (processed_batches == 4) break;
}
}
TEST(AdaptiveSharedBatchSchedulerTest, MaxTasksPerBatch) {
mutex mu;
int processed_batches = 0;
auto queue_callback =
[&mu, &processed_batches](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
mutex_lock l(mu);
++processed_batches;
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(AdaptiveSharedBatchScheduler<FakeTask>::Create({}, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
queue_options.max_batch_size = 100;
queue_options.batch_timeout_micros = 1000000;
queue_options.max_tasks_per_batch = 2;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 1);
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 0);
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
while (true) {
mutex_lock l(mu);
if (processed_batches == 3) break;
}
}
}
}
} | void MaybeScheduleNextBatch() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleNextBatchFIFO() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleClosedBatches();
void MaybeScheduleClosedBatchesLocked() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleClosedBatchesLockedFIFO() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeAdjustInflightLimit() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void AddBatch(const internal::ASBSBatch<TaskType>* batch);
void RemoveQueue(const internal::ASBSQueue<TaskType>* queue);
Env* GetEnv() const { return options_.env; } | TEST(AdaptiveSharedBatchSchedulerTest, InFlightBatchesLimitTuning) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 2;
options.batches_to_average_over = 1;
auto queue_callback = [&env](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
switch (batch->size()) {
case 0:
env.AdvanceByMicroseconds(10);
break;
case 1:
env.AdvanceByMicroseconds(15);
break;
case 2:
env.AdvanceByMicroseconds(10);
break;
case 3:
env.AdvanceByMicroseconds(11);
break;
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(0, queue.get()));
double in_flight_batches_limit = 2;
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_LT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_GT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_GT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_LT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
start_teardown.Notify();
}
stop_teardown.Notify();
} |
#include "tensorflow/compiler/jit/cluster_scoping_pass.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
namespace tensorflow {
namespace {
class ClusterScopingPassImpl {
public:
ClusterScopingPassImpl(Graph* graph,
OptimizerOptions::GlobalJitLevel global_jit_level)
: graph_(graph),
global_jit_level_(global_jit_level),
unique_scope_id_(0) {}
Status Run();
private:
Status ScopingForPipelineStages();
size_t GetUniqueScopeId() { return unique_scope_id_++; }
void AddScopeToAllTransitivePredecessors(Node* start);
void AddScopeToAllTransitiveSuccessors(Node* start);
private:
Graph* graph_;
OptimizerOptions::GlobalJitLevel global_jit_level_;
size_t unique_scope_id_;
};
std::optional<string> GetXlaInternalScope(Node* node) {
string scope;
if (GetNodeAttr(node->attrs(), kXlaInternalScopeAttr, &scope).ok()) {
return scope;
}
return std::nullopt;
}
void SetXlaInternalScope(Node* node, StringPiece scope) {
node->AddAttr(kXlaInternalScopeAttr, scope);
}
void AddOrAppendXlaInternalScope(Node* node, absl::string_view suffix) {
string updated_scope;
std::optional<string> cur_scope = GetXlaInternalScope(node);
if (cur_scope == std::nullopt) {
updated_scope = std::string(suffix);
} else {
updated_scope = absl::StrCat(cur_scope.value(), "&", suffix);
}
SetXlaInternalScope(node, updated_scope);
}
void ClusterScopingPassImpl::AddScopeToAllTransitivePredecessors(Node* start) {
const string unique_suffix = absl::StrCat("_", GetUniqueScopeId());
std::vector<Node*> starts;
starts.push_back(start);
auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); };
ReverseDFSFrom(*graph_, starts, enter, nullptr,
NodeComparatorName());
}
void ClusterScopingPassImpl::AddScopeToAllTransitiveSuccessors(Node* start) {
const string unique_suffix = absl::StrCat("_", GetUniqueScopeId());
std::vector<Node*> starts;
starts.push_back(start);
auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); };
DFSFrom(*graph_, starts, enter, nullptr,
NodeComparatorName(),
nullptr);
}
Status ClusterScopingPassImpl::ScopingForPipelineStages() {
for (Node* n : graph_->nodes()) {
DCHECK(n);
if (n->type_string() == "Unstage") {
AddScopeToAllTransitiveSuccessors(n);
}
if (n->type_string() == "Stage") {
AddScopeToAllTransitivePredecessors(n);
}
}
return absl::OkStatus();
}
Status ClusterScopingPassImpl::Run() {
if (global_jit_level_ == OptimizerOptions::OFF) {
return absl::OkStatus();
}
return ScopingForPipelineStages();
}
}
Status ClusterScopingPass::Run(const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
return ClusterScopingPassImpl{graph, GetGlobalJitLevelForGraph(options)}
.Run();
}
} | #include "tensorflow/compiler/jit/cluster_scoping_pass.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
Status ClusterScoping(std::unique_ptr<Graph>* graph) {
FixupSourceAndSinkEdges(graph->get());
GraphOptimizationPassWrapper wrapper;
wrapper.session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_global_jit_level(OptimizerOptions::ON_2);
GraphOptimizationPassOptions opt_options =
wrapper.CreateGraphOptimizationPassOptions(graph);
ClusterScopingPass pass;
return pass.Run(opt_options);
}
absl::flat_hash_map<string, string> GetXlaInternalScopes(const Graph& graph) {
absl::flat_hash_map<string, string> scopes;
for (Node* node : graph.nodes()) {
string scope;
if (GetNodeAttr(node->attrs(), kXlaInternalScopeAttr, &scope).ok()) {
scopes[node->name()] = scope;
}
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "_XlaInternalScopes:";
for (const auto& p : scopes) {
VLOG(2) << " " << p.first << " -> " << p.second;
}
}
return scopes;
}
Node* BuildStageNode(GraphDefBuilder& builder, string name,
std::initializer_list<DataType> dtypes,
absl::Span<const ops::NodeOut> values) {
auto opts = builder.opts()
.WithName(std::move(name))
.WithAttr("dtypes", std::move(dtypes));
if (opts.HaveError()) {
return nullptr;
}
NodeBuilder node_builder(name, "Stage", opts.op_registry());
node_builder.Input(values);
return opts.FinalizeBuilder(&node_builder);
}
TEST(XlaCompilationTest, StagePipelinePreserved) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("a")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::SourceOp("Const", builder.opts()
.WithName("b")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* unstage = ops::SourceOp(
"Unstage",
builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT}));
Node* add0 = ops::BinaryOp("Add", a, b, builder.opts().WithName("add0"));
Node* add1 =
ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1"));
Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0"));
ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1"));
BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0});
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(ClusterScoping(&graph));
auto scopes = GetXlaInternalScopes(*graph);
EXPECT_NE(scopes["add0"], scopes["add1"]);
EXPECT_EQ(scopes["add0"], scopes["relu0"]);
EXPECT_EQ(scopes["add1"], scopes["relu1"]);
}
TEST(XlaCompilationTest, StagePipelinePreservedAndInitialScopesRespected) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("a")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::SourceOp("Const", builder.opts()
.WithName("b")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* unstage = ops::SourceOp(
"Unstage",
builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT}));
Node* add0 = ops::BinaryOp("Add", a, b,
builder.opts().WithName("add0").WithAttr(
kXlaInternalScopeAttr, "ClusterA"));
Node* add1 = ops::BinaryOp("Add", unstage, b,
builder.opts().WithName("add1").WithAttr(
kXlaInternalScopeAttr, "ClusterA"));
Node* relu0 = ops::UnaryOp("Relu", add0,
builder.opts().WithName("relu0").WithAttr(
kXlaInternalScopeAttr, "ClusterB"));
ops::UnaryOp("Relu", add1,
builder.opts().WithName("relu1").WithAttr(
kXlaInternalScopeAttr, "ClusterD"));
BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0});
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(ClusterScoping(&graph));
auto scopes = GetXlaInternalScopes(*graph);
EXPECT_NE(scopes["add0"], scopes["add1"]);
EXPECT_NE(scopes["add0"], scopes["relu0"]);
EXPECT_NE(scopes["add1"], scopes["relu1"]);
}
}
} | Status ClusterScopingPassImpl::ScopingForPipelineStages() {
for (Node* n : graph_->nodes()) {
DCHECK(n);
if (n->type_string() == "Unstage") {
AddScopeToAllTransitiveSuccessors(n);
}
if (n->type_string() == "Stage") {
AddScopeToAllTransitivePredecessors(n);
}
}
return absl::OkStatus();
} | TEST(XlaCompilationTest, StagePipelinePreserved) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("a")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::SourceOp("Const", builder.opts()
.WithName("b")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* unstage = ops::SourceOp(
"Unstage",
builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT}));
Node* add0 = ops::BinaryOp("Add", a, b, builder.opts().WithName("add0"));
Node* add1 =
ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1"));
Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0"));
ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1"));
BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0});
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(ClusterScoping(&graph));
auto scopes = GetXlaInternalScopes(*graph);
EXPECT_NE(scopes["add0"], scopes["add1"]);
EXPECT_EQ(scopes["add0"], scopes["relu0"]);
EXPECT_EQ(scopes["add1"], scopes["relu1"]);
}
TEST(XlaCompilationTest, StagePipelinePreservedAndInitialScopesRespected) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("a")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::SourceOp("Const", builder.opts()
.WithName("b")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* unstage = ops::SourceOp(
"Unstage",
builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT}));
Node* add0 = ops::BinaryOp("Add", a, b,
builder.opts().WithName("add0").WithAttr(
kXlaInternalScopeAttr, "ClusterA"));
Node* add1 = ops::BinaryOp("Add", unstage, b,
builder.opts().WithName("add1").WithAttr(
kXlaInternalScopeAttr, "ClusterA"));
Node* relu0 = ops::UnaryOp("Relu", add0,
builder.opts().WithName("relu0").WithAttr(
kXlaInternalScopeAttr, "ClusterB"));
ops::UnaryOp("Relu", add1,
builder.opts().WithName("relu1").WithAttr(
kXlaInternalScopeAttr, "ClusterD"));
BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0});
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(ClusterScoping(&graph));
auto scopes = GetXlaInternalScopes(*graph);
EXPECT_NE(scopes["add0"], scopes["add1"]);
EXPECT_NE(scopes["add0"], scopes["relu0"]);
EXPECT_NE(scopes["add1"], scopes["relu1"]);
} |
#include "tensorflow/core/util/bad_indices_policy.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace tensorflow {
constexpr char kDefault[] = "DEFAULT";
constexpr char kErrorStr[] = "ERROR";
constexpr char kIgnoreStr[] = "IGNORE";
absl::StatusOr<BadIndicesPolicy> BadIndicesPolicyFromString(
absl::string_view str) {
if (str.empty()) return BadIndicesPolicy::kDefault;
if (str == kDefault) return BadIndicesPolicy::kDefault;
if (str == kErrorStr) return BadIndicesPolicy::kError;
if (str == kIgnoreStr) return BadIndicesPolicy::kIgnore;
return absl::InvalidArgumentError(
absl::StrCat("Unknown bad indices handling attribute: ", str));
}
} | #include "tensorflow/core/util/bad_indices_policy.h"
#include <gmock/gmock.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kDefault = "DEFAULT";
constexpr absl::string_view kErrorStr = "ERROR";
constexpr absl::string_view kIgnoreStr = "IGNORE";
class BadIndicesPolicyFromStringTest : public ::testing::Test {
protected:
void TestValidInput(absl::string_view input, BadIndicesPolicy expected) {
absl::StatusOr<BadIndicesPolicy> result = BadIndicesPolicyFromString(input);
ASSERT_TRUE(result.ok());
EXPECT_EQ(result.value(), expected);
}
};
TEST_F(BadIndicesPolicyFromStringTest, EmptyString) {
TestValidInput("", BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, DefaultKeyword) {
TestValidInput(kDefault, BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, ErrorKeyword) {
TestValidInput(kErrorStr, BadIndicesPolicy::kError);
}
TEST_F(BadIndicesPolicyFromStringTest, IgnoreKeyword) {
TestValidInput(kIgnoreStr, BadIndicesPolicy::kIgnore);
}
TEST_F(BadIndicesPolicyFromStringTest, InvalidInput) {
absl::StatusOr<BadIndicesPolicy> result =
BadIndicesPolicyFromString("unknown");
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("Unknown bad indices handling attribute"));
}
}
} | absl::StatusOr<BadIndicesPolicy> BadIndicesPolicyFromString(
absl::string_view str) {
if (str.empty()) return BadIndicesPolicy::kDefault;
if (str == kDefault) return BadIndicesPolicy::kDefault;
if (str == kErrorStr) return BadIndicesPolicy::kError;
if (str == kIgnoreStr) return BadIndicesPolicy::kIgnore;
return absl::InvalidArgumentError(
absl::StrCat("Unknown bad indices handling attribute: ", str));
} | TEST_F(BadIndicesPolicyFromStringTest, EmptyString) {
TestValidInput("", BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, DefaultKeyword) {
TestValidInput(kDefault, BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, ErrorKeyword) {
TestValidInput(kErrorStr, BadIndicesPolicy::kError);
}
TEST_F(BadIndicesPolicyFromStringTest, IgnoreKeyword) {
TestValidInput(kIgnoreStr, BadIndicesPolicy::kIgnore);
}
TEST_F(BadIndicesPolicyFromStringTest, InvalidInput) {
absl::StatusOr<BadIndicesPolicy> result =
BadIndicesPolicyFromString("unknown");
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("Unknown bad indices handling attribute"));
} |
#include "quiche/quic/core/crypto/null_decrypter.h"
#include <cstdint>
#include <limits>
#include <string>
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
NullDecrypter::NullDecrypter(Perspective perspective)
: perspective_(perspective) {}
bool NullDecrypter::SetKey(absl::string_view key) { return key.empty(); }
bool NullDecrypter::SetNoncePrefix(absl::string_view nonce_prefix) {
return nonce_prefix.empty();
}
bool NullDecrypter::SetIV(absl::string_view iv) { return iv.empty(); }
bool NullDecrypter::SetHeaderProtectionKey(absl::string_view key) {
return key.empty();
}
bool NullDecrypter::SetPreliminaryKey(absl::string_view ) {
QUIC_BUG(quic_bug_10652_1) << "Should not be called";
return false;
}
bool NullDecrypter::SetDiversificationNonce(
const DiversificationNonce& ) {
QUIC_BUG(quic_bug_10652_2) << "Should not be called";
return true;
}
bool NullDecrypter::DecryptPacket(uint64_t ,
absl::string_view associated_data,
absl::string_view ciphertext, char* output,
size_t* output_length,
size_t max_output_length) {
QuicDataReader reader(ciphertext.data(), ciphertext.length(),
quiche::HOST_BYTE_ORDER);
absl::uint128 hash;
if (!ReadHash(&reader, &hash)) {
return false;
}
absl::string_view plaintext = reader.ReadRemainingPayload();
if (plaintext.length() > max_output_length) {
QUIC_BUG(quic_bug_10652_3)
<< "Output buffer must be larger than the plaintext.";
return false;
}
if (hash != ComputeHash(associated_data, plaintext)) {
return false;
}
memcpy(output, plaintext.data(), plaintext.length());
*output_length = plaintext.length();
return true;
}
std::string NullDecrypter::GenerateHeaderProtectionMask(
QuicDataReader* ) {
return std::string(5, 0);
}
size_t NullDecrypter::GetKeySize() const { return 0; }
size_t NullDecrypter::GetNoncePrefixSize() const { return 0; }
size_t NullDecrypter::GetIVSize() const { return 0; }
absl::string_view NullDecrypter::GetKey() const { return absl::string_view(); }
absl::string_view NullDecrypter::GetNoncePrefix() const {
return absl::string_view();
}
uint32_t NullDecrypter::cipher_id() const { return 0; }
QuicPacketCount NullDecrypter::GetIntegrityLimit() const {
return std::numeric_limits<QuicPacketCount>::max();
}
bool NullDecrypter::ReadHash(QuicDataReader* reader, absl::uint128* hash) {
uint64_t lo;
uint32_t hi;
if (!reader->ReadUInt64(&lo) || !reader->ReadUInt32(&hi)) {
return false;
}
*hash = absl::MakeUint128(hi, lo);
return true;
}
absl::uint128 NullDecrypter::ComputeHash(const absl::string_view data1,
const absl::string_view data2) const {
absl::uint128 correct_hash;
if (perspective_ == Perspective::IS_CLIENT) {
correct_hash = QuicUtils::FNV1a_128_Hash_Three(data1, data2, "Server");
} else {
correct_hash = QuicUtils::FNV1a_128_Hash_Three(data1, data2, "Client");
}
absl::uint128 mask = absl::MakeUint128(UINT64_C(0x0), UINT64_C(0xffffffff));
mask <<= 96;
correct_hash &= ~mask;
return correct_hash;
}
} | #include "quiche/quic/core/crypto/null_decrypter.h"
#include "absl/base/macros.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
class NullDecrypterTest : public QuicTestWithParam<bool> {};
TEST_F(NullDecrypterTest, DecryptClient) {
unsigned char expected[] = {
0x97,
0xdc,
0x27,
0x2f,
0x18,
0xa8,
0x56,
0x73,
0xdf,
0x8d,
0x1d,
0xd0,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_SERVER);
char buffer[256];
size_t length = 0;
ASSERT_TRUE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
EXPECT_LT(0u, length);
EXPECT_EQ("goodbye!", absl::string_view(buffer, length));
}
TEST_F(NullDecrypterTest, DecryptServer) {
unsigned char expected[] = {
0x63,
0x5e,
0x08,
0x03,
0x32,
0x80,
0x8f,
0x73,
0xdf,
0x8d,
0x1d,
0x1a,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_CLIENT);
char buffer[256];
size_t length = 0;
ASSERT_TRUE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
EXPECT_LT(0u, length);
EXPECT_EQ("goodbye!", absl::string_view(buffer, length));
}
TEST_F(NullDecrypterTest, BadHash) {
unsigned char expected[] = {
0x46,
0x11,
0xea,
0x5f,
0xcf,
0x1d,
0x66,
0x5b,
0xba,
0xf0,
0xbc,
0xfd,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_CLIENT);
char buffer[256];
size_t length = 0;
ASSERT_FALSE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
}
TEST_F(NullDecrypterTest, ShortInput) {
unsigned char expected[] = {
0x46, 0x11, 0xea, 0x5f, 0xcf, 0x1d, 0x66, 0x5b, 0xba, 0xf0, 0xbc,
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_CLIENT);
char buffer[256];
size_t length = 0;
ASSERT_FALSE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
}
}
} | #include "quiche/quic/core/crypto/null_decrypter.h"
#include <cstdint>
#include <limits>
#include <string>
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
NullDecrypter::NullDecrypter(Perspective perspective)
: perspective_(perspective) {} | TEST_F(NullDecrypterTest, DecryptClient) {
unsigned char expected[] = {
0x97,
0xdc,
0x27,
0x2f,
0x18,
0xa8,
0x56,
0x73,
0xdf,
0x8d,
0x1d,
0xd0,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_SERVER);
char buffer[256];
size_t length = 0;
ASSERT_TRUE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
EXPECT_LT(0u, length);
EXPECT_EQ("goodbye!", absl::string_view(buffer, length));
}
TEST_F(NullDecrypterTest, DecryptServer) {
unsigned char expected[] = {
0x63,
0x5e,
0x08,
0x03,
0x32,
0x80,
0x8f,
0x73,
0xdf,
0x8d,
0x1d,
0x1a,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_CLIENT);
char buffer[256];
size_t length = 0;
ASSERT_TRUE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
EXPECT_LT(0u, length);
EXPECT_EQ("goodbye!", absl::string_view(buffer, length));
}
TEST_F(NullDecrypterTest, BadHash) {
unsigned char expected[] = {
0x46,
0x11,
0xea,
0x5f,
0xcf,
0x1d,
0x66,
0x5b,
0xba,
0xf0,
0xbc,
0xfd,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_CLIENT);
char buffer[256];
size_t length = 0;
ASSERT_FALSE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
}
TEST_F(NullDecrypterTest, ShortInput) {
unsigned char expected[] = {
0x46, 0x11, 0xea, 0x5f, 0xcf, 0x1d, 0x66, 0x5b, 0xba, 0xf0, 0xbc,
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_CLIENT);
char buffer[256];
size_t length = 0;
ASSERT_FALSE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
} |
#include "tensorflow/core/example/feature_util.h"
#include <string>
#include "absl/strings/string_view.h"
namespace tensorflow {
namespace internal {
Feature& ExampleFeature(absl::string_view name, Example* example) {
return *GetFeature(name, example);
}
}
template <>
bool HasFeature<>(absl::string_view key, const Features& features) {
return features.feature().contains(internal::ProtoMapKey(key));
}
template <>
bool HasFeature<protobuf_int64>(absl::string_view key,
const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kInt64List);
}
template <>
bool HasFeature<float>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kFloatList);
}
template <>
bool HasFeature<std::string>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kBytesList);
}
template <>
bool HasFeature<tstring>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kBytesList);
}
bool HasFeatureList(absl::string_view key,
const SequenceExample& sequence_example) {
return sequence_example.feature_lists().feature_list().contains(
internal::ProtoMapKey(key));
}
template <>
const protobuf::RepeatedField<protobuf_int64>& GetFeatureValues<protobuf_int64>(
const Feature& feature) {
return feature.int64_list().value();
}
template <>
protobuf::RepeatedField<protobuf_int64>* GetFeatureValues<protobuf_int64>(
Feature* feature) {
return feature->mutable_int64_list()->mutable_value();
}
template <>
const protobuf::RepeatedField<float>& GetFeatureValues<float>(
const Feature& feature) {
return feature.float_list().value();
}
template <>
protobuf::RepeatedField<float>* GetFeatureValues<float>(Feature* feature) {
return feature->mutable_float_list()->mutable_value();
}
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<tstring>(
const Feature& feature) {
return feature.bytes_list().value();
}
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<std::string>(
const Feature& feature) {
return feature.bytes_list().value();
}
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<tstring>(
Feature* feature) {
return feature->mutable_bytes_list()->mutable_value();
}
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<std::string>(
Feature* feature) {
return feature->mutable_bytes_list()->mutable_value();
}
const protobuf::RepeatedPtrField<Feature>& GetFeatureList(
absl::string_view key, const SequenceExample& sequence_example) {
return sequence_example.feature_lists()
.feature_list()
.at(internal::ProtoMapKey(key))
.feature();
}
protobuf::RepeatedPtrField<Feature>* GetFeatureList(
absl::string_view feature_list_key, SequenceExample* sequence_example) {
return (*sequence_example->mutable_feature_lists()
->mutable_feature_list())[internal::ProtoMapKey(
feature_list_key)]
.mutable_feature();
}
template <>
void ClearFeatureValues<protobuf_int64>(Feature* feature) {
feature->mutable_int64_list()->Clear();
}
template <>
void ClearFeatureValues<float>(Feature* feature) {
feature->mutable_float_list()->Clear();
}
template <>
void ClearFeatureValues<std::string>(Feature* feature) {
feature->mutable_bytes_list()->Clear();
}
template <>
void ClearFeatureValues<tstring>(Feature* feature) {
feature->mutable_bytes_list()->Clear();
}
template <>
Features* GetFeatures<Features>(Features* proto) {
return proto;
}
template <>
Features* GetFeatures<Example>(Example* proto) {
return proto->mutable_features();
}
template <>
Features* GetFeatures<SequenceExample>(SequenceExample* proto) {
return proto->mutable_context();
}
template <>
const Features& GetFeatures<Features>(const Features& proto) {
return proto;
}
template <>
const Features& GetFeatures<Example>(const Example& proto) {
return proto.features();
}
template <>
const Features& GetFeatures<SequenceExample>(const SequenceExample& proto) {
return proto.context();
}
} | #include "tensorflow/core/example/feature_util.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
const float kTolerance = 1e-5;
TEST(GetFeatureValuesInt64Test, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ(42, tag.Get(0));
}
TEST(GetFeatureValuesInt64Test, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_int64_list()->add_value(42);
auto values = GetFeatureValues<protobuf_int64>(feature);
ASSERT_EQ(1, values.size());
EXPECT_EQ(42, values.Get(0));
}
TEST(GetFeatureValuesInt64Test, ReadsASingleValueFromSequenceExampleContext) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ(42, tag.Get(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValue) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_EQ(1,
example.features().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, example.features().feature().at("tag").int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValueToFeature) {
Feature feature;
GetFeatureValues<protobuf_int64>(&feature)->Add(42);
ASSERT_EQ(1, feature.int64_list().value_size());
EXPECT_EQ(42, feature.int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValueToSequenceExample) {
SequenceExample example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_EQ(1, example.context().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, example.context().feature().at("tag").int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, CheckUntypedFieldExistence) {
Example example;
ASSERT_FALSE(HasFeature("tag", example));
GetFeatureValues<protobuf_int64>("tag", &example)->Add(0);
EXPECT_TRUE(HasFeature("tag", example));
}
TEST(GetFeatureValuesInt64Test, CheckUntypedFieldExistenceForSequenceExample) {
SequenceExample seq_example;
ASSERT_FALSE(HasFeature("tag", seq_example));
GetFeatureValues<protobuf_int64>("tag", &seq_example)->Add(0);
EXPECT_TRUE(HasFeature("tag", seq_example));
}
TEST(GetFeatureValuesInt64Test, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", example));
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", example));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(GetFeatureValuesInt64Test, CheckTypedFieldExistenceForSequenceExample) {
SequenceExample sequence_example;
GetFeatureValues<float>("tag", &sequence_example)->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", sequence_example));
GetFeatureValues<protobuf_int64>("tag", &sequence_example)->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", sequence_example));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", sequence_example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(GetFeatureValuesInt64Test, CopyIterableToAField) {
Example example;
std::vector<int> values{1, 2, 3};
std::copy(values.begin(), values.end(),
protobuf::RepeatedFieldBackInserter(
GetFeatureValues<protobuf_int64>("tag", &example)));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ(1, tag_ro.Get(0));
EXPECT_EQ(2, tag_ro.Get(1));
EXPECT_EQ(3, tag_ro.Get(2));
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_float_list()->add_value(3.14);
auto values = GetFeatureValues<float>(feature);
ASSERT_EQ(1, values.size());
EXPECT_NEAR(3.14, values.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(3.14);
auto tag = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_NEAR(3.14, tag.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValueFromSequenceExample) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(3.14);
auto tag = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_NEAR(3.14, tag.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValueToFeature) {
Feature feature;
GetFeatureValues<float>(&feature)->Add(3.14);
ASSERT_EQ(1, feature.float_list().value_size());
EXPECT_NEAR(3.14, feature.float_list().value(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValue) {
Example example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_EQ(1,
example.features().feature().at("tag").float_list().value_size());
EXPECT_NEAR(3.14,
example.features().feature().at("tag").float_list().value(0),
kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValueToSequenceExample) {
SequenceExample example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_EQ(1, example.context().feature().at("tag").float_list().value_size());
EXPECT_NEAR(3.14, example.context().feature().at("tag").float_list().value(0),
kTolerance);
}
TEST(GetFeatureValuesFloatTest, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(HasFeature<float>("tag", example));
GetFeatureValues<float>("tag", &example)->Add(3.14);
EXPECT_TRUE(HasFeature<float>("tag", example));
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_NEAR(3.14, tag_ro.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, CheckTypedFieldExistenceForDeprecatedMethod) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(ExampleHasFeature<float>("tag", example));
GetFeatureValues<float>("tag", &example)->Add(3.14);
EXPECT_TRUE(ExampleHasFeature<float>("tag", example));
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_NEAR(3.14, tag_ro.Get(0), kTolerance);
}
TEST(GetFeatureValuesStringTest, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_bytes_list()->add_value("FOO");
auto values = GetFeatureValues<std::string>(feature);
ASSERT_EQ(1, values.size());
EXPECT_EQ("FOO", values.Get(0));
}
TEST(GetFeatureValuesStringTest, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("FOO");
auto tag = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ("FOO", tag.Get(0));
}
TEST(GetFeatureValuesStringTest, ReadsASingleValueFromSequenceExample) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("FOO");
auto tag = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ("FOO", tag.Get(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValueToFeature) {
Feature feature;
*GetFeatureValues<std::string>(&feature)->Add() = "FOO";
ASSERT_EQ(1, feature.bytes_list().value_size());
EXPECT_EQ("FOO", feature.bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValue) {
Example example;
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
ASSERT_EQ(1,
example.features().feature().at("tag").bytes_list().value_size());
EXPECT_EQ("FOO",
example.features().feature().at("tag").bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValueSequenceExample) {
SequenceExample example;
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
ASSERT_EQ(1, example.context().feature().at("tag").bytes_list().value_size());
EXPECT_EQ("FOO", example.context().feature().at("tag").bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(HasFeature<std::string>("tag", example));
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
EXPECT_TRUE(HasFeature<std::string>("tag", example));
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
}
TEST(AppendFeatureValuesTest, FloatValuesFromContainer) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesFromContainerWithStringViewKey) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
absl::string_view key("tag");
AppendFeatureValues(values, key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingInitializerList) {
Example example;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest,
FloatValuesUsingInitializerListWithStringViewKey) {
Example example;
absl::string_view key("tag");
AppendFeatureValues({1.1, 2.2, 3.3}, key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingIterators) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values.begin(), values.end(), "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingIteratorsWithStringViewKey) {
Example example;
absl::string_view key("tag");
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values.begin(), values.end(), key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SetFeatureValuesTest, FloatValuesUsingInitializerList) {
Example example;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", &example);
SetFeatureValues({10.1, 20.2, 30.3}, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(10.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(20.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(30.3, tag_ro.Get(2), kTolerance);
}
TEST(SetFeatureValuesTest, ContainerOfStringView) {
Example example;
std::vector<std::string> values = {"hello", "world"};
std::vector<absl::string_view> values_string_view(values.begin(),
values.end());
SetFeatureValues(values_string_view, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(tag_ro.size(), 2);
EXPECT_EQ(tag_ro.Get(0), "hello");
EXPECT_EQ(tag_ro.Get(1), "world");
}
TEST(AppendFeatureValuesTest, Int64ValuesUsingInitializerList) {
Example example;
std::vector<protobuf_int64> values{1, 2, 3};
AppendFeatureValues(values, "tag", &example);
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ(1, tag_ro.Get(0));
EXPECT_EQ(2, tag_ro.Get(1));
EXPECT_EQ(3, tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringValuesUsingInitializerList) {
Example example;
AppendFeatureValues({"FOO", "BAR", "BAZ"}, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringVariablesUsingInitializerList) {
Example example;
string string1("FOO");
string string2("BAR");
string string3("BAZ");
AppendFeatureValues({string1, string2, string3}, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringViewVariablesUsingInitializerList) {
Example example;
AppendFeatureValues({absl::string_view("FOO"), absl::string_view("BAR"),
absl::string_view("BAZ")},
"tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringViewVariablesUsingIterators) {
Example example;
std::vector<absl::string_view> strings;
strings.push_back("FOO");
strings.push_back("BAR");
strings.push_back("BAZ");
AppendFeatureValues(strings.begin(), strings.end(), "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(GetFeatureTest, WritesAVectorToFeature) {
Example example;
Feature* feature = GetFeature("tag", &example);
AppendFeatureValues<float>({1.1, 2.2, 3.3}, feature);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(GetFeatureTest, ReadsAVectorFromFeature) {
Example example;
AppendFeatureValues<float>({1.1, 2.2, 3.3}, "tag", &example);
const Feature& feature = GetFeature("tag", example);
auto tag_ro = GetFeatureValues<float>(feature);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SequenceExampleTest, ReadsASingleValueFromContext) {
SequenceExample se;
(*se.mutable_context()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto values = GetFeatureValues<protobuf_int64>("tag", se.context());
ASSERT_EQ(1, values.size());
EXPECT_EQ(42, values.Get(0));
}
TEST(SequenceExampleTest, WritesASingleValueToContext) {
SequenceExample se;
GetFeatureValues<protobuf_int64>("tag", se.mutable_context())->Add(42);
ASSERT_EQ(1, se.context().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, se.context().feature().at("tag").int64_list().value(0));
}
TEST(SequenceExampleTest, AppendFeatureValuesToContextSingleArg) {
SequenceExample se;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", se.mutable_context());
auto tag_ro = GetFeatureValues<float>("tag", se.context());
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SequenceExampleTest, CheckTypedFieldExistence) {
SequenceExample se;
GetFeatureValues<float>("tag", se.mutable_context())->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", se.context()));
GetFeatureValues<protobuf_int64>("tag", se.mutable_context())->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", se.context()));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", se.context());
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(SequenceExampleTest, ReturnsExistingFeatureLists) {
SequenceExample se;
(*se.mutable_feature_lists()->mutable_feature_list())["tag"]
.mutable_feature()
->Add();
auto feature = GetFeatureList("tag", se);
ASSERT_EQ(1, feature.size());
}
TEST(SequenceExampleTest, CreatesNewFeatureLists) {
SequenceExample se;
GetFeatureList("tag", &se)->Add();
EXPECT_EQ(1, se.feature_lists().feature_list().at("tag").feature_size());
}
TEST(SequenceExampleTest, CheckFeatureListExistence) {
SequenceExample se;
ASSERT_FALSE(HasFeatureList("tag", se));
GetFeatureList("tag", &se)->Add();
ASSERT_TRUE(HasFeatureList("tag", se));
}
TEST(SequenceExampleTest, AppendFeatureValuesWithInitializerList) {
SequenceExample se;
AppendFeatureValues({1, 2, 3}, "ids", se.mutable_context());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("images", &se)->Add());
AppendFeatureValues({"cam1-1", "cam2-2"},
GetFeatureList("images", &se)->Add());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 1\n"
" value: 2\n"
" value: 3\n"
" }\n"
" }\n"
" }\n"
"}\n"
"feature_lists {\n"
" feature_list {\n"
" key: \"images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-0\"\n"
" value: \"cam2-0\"\n"
" }\n"
" }\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-2\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, AppendFeatureValuesWithVectors) {
SequenceExample se;
std::vector<float> readings{1.0, 2.5, 5.0};
AppendFeatureValues(readings, GetFeatureList("movie_ratings", &se)->Add());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"feature_lists {\n"
" feature_list {\n"
" key: \"movie_ratings\"\n"
" value {\n"
" feature {\n"
" float_list {\n"
" value: 1\n"
" value: 2.5\n"
" value: 5\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, SetContextFeatureValuesWithInitializerList) {
SequenceExample se;
SetFeatureValues({101, 102, 103}, "ids", se.mutable_context());
SetFeatureValues({1, 2, 3}, "ids", se.mutable_context());
AppendFeatureValues({4, 5, 6}, "ids", se.mutable_context());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 1\n"
" value: 2\n"
" value: 3\n"
" value: 4\n"
" value: 5\n"
" value: 6\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, SetFeatureValuesWithInitializerList) {
SequenceExample se;
AppendFeatureValues({1, 2, 3}, "ids", se.mutable_context());
SetFeatureValues({4, 5, 6}, "ids", se.mutable_context());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("images", &se)->Add());
SetFeatureValues({"cam1-1", "cam2-1"}, GetFeatureList("images", &se)->Add());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("more-images", &se)->Add());
SetFeatureValues({"cam1-1", "cam2-1"},
GetFeatureList("more-images", &se)->Mutable(0));
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 4\n"
" value: 5\n"
" value: 6\n"
" }\n"
" }\n"
" }\n"
"}\n"
"feature_lists {\n"
" feature_list {\n"
" key: \"images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-0\"\n"
" value: \"cam2-0\"\n"
" }\n"
" }\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-1\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
" feature_list {\n"
" key: \"more-images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-1\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(MaybeGetFeatureValuesTest, ReturnsNullPtr) {
const Example example;
auto tag = MaybeGetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(tag, nullptr);
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleInt) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = MaybeGetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_EQ(42, tag->Get(0));
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleFloat) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(0.3);
auto tag = MaybeGetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_FLOAT_EQ(0.3, tag->Get(0));
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleString) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("entry");
auto tag = MaybeGetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_EQ("entry", tag->Get(0));
}
}
} | bool HasFeatureList(absl::string_view key,
const SequenceExample& sequence_example) {
return sequence_example.feature_lists().feature_list().contains(
internal::ProtoMapKey(key));
} | TEST(SequenceExampleTest, CheckFeatureListExistence) {
SequenceExample se;
ASSERT_FALSE(HasFeatureList("tag", se));
GetFeatureList("tag", &se)->Add();
ASSERT_TRUE(HasFeatureList("tag", se));
} |
#ifndef TENSORSTORE_INTERNAL_ARENA_H_
#define TENSORSTORE_INTERNAL_ARENA_H_
#include <stddef.h>
#include <memory>
#include <new>
#include <utility>
#include "tensorstore/internal/exception_macros.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
class Arena {
public:
Arena() : remaining_bytes_(0) {}
explicit Arena(span<unsigned char> initial_buffer)
: initial_buffer_(initial_buffer),
remaining_bytes_(initial_buffer.size()) {}
template <typename T = unsigned char>
T* allocate(size_t n, size_t alignment = alignof(T)) {
size_t num_bytes;
if (MulOverflow(n, sizeof(T), &num_bytes)) {
TENSORSTORE_THROW_BAD_ALLOC;
}
void* ptr = static_cast<void*>(initial_buffer_.end() - remaining_bytes_);
if (std::align(alignment, num_bytes, ptr, remaining_bytes_)) {
remaining_bytes_ -= num_bytes;
} else {
ptr = ::operator new(num_bytes, std::align_val_t(alignment));
}
return static_cast<T*>(ptr);
}
template <typename T>
void deallocate(T* p, size_t n, size_t alignment = alignof(T)) {
if (static_cast<void*>(p) >= static_cast<void*>(initial_buffer_.data()) &&
static_cast<void*>(p + n) <=
static_cast<void*>(initial_buffer_.data() +
initial_buffer_.size())) {
return;
}
::operator delete(static_cast<void*>(p), n * sizeof(T),
std::align_val_t(alignment));
}
private:
span<unsigned char> initial_buffer_;
size_t remaining_bytes_;
};
template <typename T = unsigned char>
class ArenaAllocator {
public:
using value_type = T;
using pointer = T*;
using void_pointer = void*;
using const_void_pointer = const void*;
using reference = T&;
using const_pointer = const T*;
using const_reference = const T&;
using size_type = size_t;
using difference_type = ptrdiff_t;
template <typename U>
struct rebind {
using other = ArenaAllocator<U>;
};
ArenaAllocator(Arena* arena) : arena_(arena) {}
template <typename U>
ArenaAllocator(ArenaAllocator<U> other) : arena_(other.arena()) {}
T* allocate(size_t n) const { return arena_->allocate<T>(n); }
void deallocate(T* p, size_t n) const { arena_->deallocate(p, n); }
template <typename... Arg>
void construct(T* p, Arg&&... arg) {
new (p) T(std::forward<Arg>(arg)...);
}
void destroy(T* p) { p->~T(); }
Arena* arena() const { return arena_; }
friend bool operator==(ArenaAllocator a, ArenaAllocator b) {
return a.arena_ == b.arena_;
}
friend bool operator!=(ArenaAllocator a, ArenaAllocator b) {
return a.arena_ != b.arena_;
}
Arena* arena_;
};
}
}
#endif | #include "tensorstore/internal/arena.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::ArenaAllocator;
bool Contains(tensorstore::span<const unsigned char> buffer, void* ptr) {
return ptr >= buffer.data() && ptr < buffer.data() + buffer.size();
}
TEST(ArenaTest, Small) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<int, ArenaAllocator<int>> vec(100, &arena);
EXPECT_EQ(&arena, vec.get_allocator().arena());
std::fill(vec.begin(), vec.end(), 5);
EXPECT_TRUE(Contains(buffer, vec.data()));
}
TEST(ArenaTest, Alignment) {
alignas(16) unsigned char buffer[1024];
for (int x = 1; x <= 16; x *= 2) {
Arena arena(buffer);
unsigned char* ptr1 = arena.allocate(1, 1);
EXPECT_EQ(&buffer[0], ptr1);
unsigned char* ptr2 = arena.allocate(1, x);
EXPECT_EQ(0u, reinterpret_cast<std::uintptr_t>(ptr2) % x);
EXPECT_EQ(&buffer[x], ptr2);
arena.deallocate(ptr1, 1, 1);
arena.deallocate(ptr2, 1, x);
}
{
Arena arena(buffer);
unsigned char* ptr = arena.allocate(2000, 16);
EXPECT_EQ(0u, reinterpret_cast<std::uintptr_t>(ptr) % 16);
arena.deallocate(ptr, 2000, 16);
}
}
TEST(ArenaTest, Large) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<int, ArenaAllocator<int>> vec(&arena);
vec.resize(2000);
std::fill(vec.begin(), vec.end(), 7);
EXPECT_FALSE(Contains(buffer, vec.data()));
}
TEST(ArenaTest, MultipleSmall) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<std::int32_t, ArenaAllocator<int>> vec(100, &arena);
EXPECT_EQ(&arena, vec.get_allocator().arena());
std::fill(vec.begin(), vec.end(), 5);
EXPECT_TRUE(Contains(buffer, vec.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec2(100, &arena);
std::fill(vec2.begin(), vec2.end(), 6);
EXPECT_TRUE(Contains(buffer, vec2.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec3(100, &arena);
std::fill(vec3.begin(), vec3.end(), 7);
EXPECT_FALSE(Contains(buffer, vec3.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec4(5, &arena);
std::fill(vec4.begin(), vec4.end(), 8);
EXPECT_TRUE(Contains(buffer, vec4.data()));
EXPECT_THAT(vec,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 5)));
EXPECT_THAT(vec2,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 6)));
EXPECT_THAT(vec3,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 7)));
EXPECT_THAT(vec4,
::testing::ElementsAreArray(std::vector<std::int32_t>(5, 8)));
}
} | T* allocate(size_t n) const { return arena_->allocate<T>(n); } | TEST(ArenaTest, Small) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<int, ArenaAllocator<int>> vec(100, &arena);
EXPECT_EQ(&arena, vec.get_allocator().arena());
std::fill(vec.begin(), vec.end(), 5);
EXPECT_TRUE(Contains(buffer, vec.data()));
}
TEST(ArenaTest, MultipleSmall) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<std::int32_t, ArenaAllocator<int>> vec(100, &arena);
EXPECT_EQ(&arena, vec.get_allocator().arena());
std::fill(vec.begin(), vec.end(), 5);
EXPECT_TRUE(Contains(buffer, vec.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec2(100, &arena);
std::fill(vec2.begin(), vec2.end(), 6);
EXPECT_TRUE(Contains(buffer, vec2.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec3(100, &arena);
std::fill(vec3.begin(), vec3.end(), 7);
EXPECT_FALSE(Contains(buffer, vec3.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec4(5, &arena);
std::fill(vec4.begin(), vec4.end(), 8);
EXPECT_TRUE(Contains(buffer, vec4.data()));
EXPECT_THAT(vec,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 5)));
EXPECT_THAT(vec2,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 6)));
EXPECT_THAT(vec3,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 7)));
EXPECT_THAT(vec4,
::testing::ElementsAreArray(std::vector<std::int32_t>(5, 8)));
} |
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
FusionNodeIndexingEvaluation::FusionNodeIndexingEvaluation(
const HloInstruction* fusion, int64_t root_usage_count)
: fusion_(fusion) {
HloInstruction* root = fusion->fused_expression_root();
indexing_users_[root].insert(fusion);
index_usage_count_[fusion] = root_usage_count;
RecomputeCache();
}
const int64_t FusionNodeIndexingEvaluation::kAllowedCodeDuplication = 15;
namespace {
int64_t UserCount(const HloInstruction* hlo) {
int64_t cnt = 0;
for (HloInstruction* user : hlo->users()) {
if (user->opcode() == HloOpcode::kFusion) {
int64_t operand_index = user->operand_index(hlo);
cnt += user->fused_parameter(operand_index)->user_count();
} else {
++cnt;
}
}
return cnt;
}
}
bool FusionNodeIndexingEvaluation::CodeDuplicationTooHigh(
const HloInstruction* producer) const {
if (producer->opcode() == HloOpcode::kBroadcast) {
return false;
}
int64_t emitted_instructions = EvaluateEmittedInstructions(producer);
return emitted_instructions > kAllowedCodeDuplication ||
(ElementalIrEmitter::OpInvalidatesCache(producer) &&
(emitted_instructions > 1 || UserCount(producer) > 1));
}
bool FusionNodeIndexingEvaluation::MaxCodeDuplicationTooHigh() const {
for (const auto& entry : index_usage_count_) {
if (entry.second > kAllowedCodeDuplication ||
(ElementalIrEmitter::OpInvalidatesCache(entry.first) &&
(entry.second > 1 || UserCount(entry.first) > 1))) {
return true;
}
}
return false;
}
int64_t FusionNodeIndexingEvaluation::EvaluateEmittedInstructions(
const HloInstruction* producer) const {
int64_t total = 0;
for (const auto* user : indexing_users_.at(producer)) {
total += index_usage_count_.at(user);
}
return total;
}
void FusionNodeIndexingEvaluation::UpdateEvaluationCache(
const HloInstruction* producer,
absl::flat_hash_set<const HloInstruction*> indexing_users_of_producer) {
CHECK(!indexing_users_.contains(producer));
indexing_users_[producer] = std::move(indexing_users_of_producer);
UpdateIndexUsageCount(producer);
UpdateIndexingUsersOfOperands(producer);
}
absl::flat_hash_set<const HloInstruction*>
FusionNodeIndexingEvaluation::RemoveFusionOperand(
HloInstruction* fusion_operand) {
auto indexing_users_of_operand =
std::move(indexing_users_.at(fusion_operand));
indexing_users_.erase(fusion_operand);
CHECK(!index_usage_count_.contains(fusion_operand));
return indexing_users_of_operand;
}
void FusionNodeIndexingEvaluation::RecomputeCache() {
auto postorder =
fusion_->fused_instructions_computation()->MakeInstructionPostOrder();
std::reverse(postorder.begin(), postorder.end());
for (const auto* instruction : postorder) {
if (instruction->opcode() == HloOpcode::kParameter) {
continue;
}
UpdateIndexUsageCount(instruction);
UpdateIndexingUsersOfOperands(instruction);
}
}
void FusionNodeIndexingEvaluation::UpdateIndexUsageCount(
const HloInstruction* instruction) {
int64_t total = 0;
for (const auto* user : indexing_users_[instruction]) {
total += index_usage_count_.at(user);
}
CHECK(index_usage_count_.emplace(instruction, total).second);
}
void FusionNodeIndexingEvaluation::UpdateIndexingUsersOfOperands(
const HloInstruction* instruction) {
for (const auto* operand : instruction->operands()) {
if (operand->opcode() == HloOpcode::kParameter) {
operand = fusion_->operand(operand->parameter_number());
}
if (instruction->opcode() == HloOpcode::kTranspose ||
Shape::Equal().IgnoreElementType()(operand->shape(),
instruction->shape())) {
indexing_users_[operand].insert(indexing_users_[instruction].begin(),
indexing_users_[instruction].end());
} else {
indexing_users_[operand].insert(instruction);
}
}
}
} | #include "xla/service/fusion_node_indexing_evaluation.h"
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/instruction_fusion.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
using FusionNodeIndexingEvaluationTest = HloTestBase;
class InstructionFusionForTesting : public InstructionFusion {
public:
explicit InstructionFusionForTesting()
: InstructionFusion(InstructionFusion::IsExpensive) {}
HloInstruction* FuseInstruction(HloInstruction* fusion_instruction,
HloInstruction* producer) override {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation =
fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer,
HloComputation* computation) override {
return InstructionFusion::Fuse(producer, consumer, computation);
}
int64_t EvaluateEmittedInstructions(const HloInstruction* producer,
const HloInstruction* consumer) {
if (consumer->opcode() != HloOpcode::kFusion) {
return 0;
}
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
return fusion_node_evaluations_.at(consumer).EvaluateEmittedInstructions(
producer);
}
const FusionNodeIndexingEvaluation* GetFusionNodeEvaluation(
const HloInstruction* consumer) {
auto it = fusion_node_evaluations_.find(consumer);
if (it == fusion_node_evaluations_.end()) {
return nullptr;
}
return &it->second;
}
private:
absl::flat_hash_map<const HloInstruction*, FusionNodeIndexingEvaluation>
fusion_node_evaluations_;
};
TEST_F(FusionNodeIndexingEvaluationTest, FuseTwoInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4,3]{1,0} parameter(0)
add = f32[4,3]{1,0} add(p0, p0)
ROOT sub = f32[4,3]{1,0} subtract(add, p0)
})")
.value();
HloInstruction* sub = module->entry_computation()->root_instruction();
HloInstruction* add = sub->mutable_operand(0);
InstructionFusionForTesting().Fuse(add, sub, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, FuseThreeInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
slice1 = f32[3]{0} slice(p0), slice={[0:3]}
slice2 = f32[3]{0} slice(p0), slice={[0:3]}
ROOT sub = f32[3]{0} subtract(slice1, slice2)
})")
.value();
HloInstruction* sub = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* slice1 = sub->mutable_operand(0);
HloInstruction* slice2 = sub->mutable_operand(1);
auto fusion =
instruction_fusion.Fuse(slice1, sub, module->entry_computation());
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice2, fusion), 1);
instruction_fusion.Fuse(slice2, fusion, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, ExponentialDuplicationPattern) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
p1 = f32[4]{0} parameter(1)
add0 = f32[4]{0} add(p0, p1)
slice1.0 = f32[3]{0} slice(add0), slice={[0:3]}
slice1.1 = f32[3]{0} slice(add0), slice={[1:4]}
add1 = f32[3]{0} add(slice1.0, slice1.1)
slice2.0 = f32[2]{0} slice(add1), slice={[0:2]}
slice2.1 = f32[2]{0} slice(add1), slice={[1:3]}
ROOT add2 = f32[2]{0} add(slice2.0, slice2.1)
})")
.value();
HloInstruction* add2 = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* slice2_0 = add2->mutable_operand(0);
HloInstruction* slice2_1 = add2->mutable_operand(1);
auto fusion =
instruction_fusion.Fuse(slice2_0, add2, module->entry_computation());
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice2_1, fusion),
1);
instruction_fusion.Fuse(slice2_1, fusion, module->entry_computation());
HloInstruction* add1 = fusion->mutable_operand(0);
EXPECT_EQ(add1->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add1, fusion), 2);
instruction_fusion.Fuse(add1, fusion, module->entry_computation());
HloInstruction* slice1_0 = fusion->mutable_operand(0);
EXPECT_EQ(slice1_0->opcode(), HloOpcode::kSlice);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice1_0, fusion),
2);
instruction_fusion.Fuse(slice1_0, fusion, module->entry_computation());
HloInstruction* slice1_1 = fusion->mutable_operand(0);
EXPECT_EQ(slice1_1->opcode(), HloOpcode::kSlice);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice1_1, fusion),
2);
instruction_fusion.Fuse(slice1_1, fusion, module->entry_computation());
HloInstruction* add0 = fusion->mutable_operand(0);
EXPECT_EQ(add0->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4);
instruction_fusion.Fuse(add0, fusion, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, RecomputeCache) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%fused_computation (param_0.5: f32[4]) -> f32[2] {
%param_0.5 = f32[4]{0} parameter(0)
%slice1.2 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[0:3]}
%slice1.3 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[1:4]}
%add1.1 = f32[3]{0} add(f32[3]{0} %slice1.2, f32[3]{0} %slice1.3)
%slice2.2 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[0:2]}
%slice2.3 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[1:3]}
ROOT %add2.1 = f32[2]{0} add(f32[2]{0} %slice2.2, f32[2]{0} %slice2.3)
}
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
p1 = f32[4]{0} parameter(1)
add0 = f32[4]{0} add(p0, p1)
ROOT %fusion = f32[2]{0} fusion(add0), kind=kLoop, calls=%fused_computation
})")
.value();
HloInstruction* fusion = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* add0 = fusion->mutable_operand(0);
EXPECT_EQ(add0->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4);
}
TEST_F(FusionNodeIndexingEvaluationTest, CodeDuplicationTooHigh) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%fused_computation (param: f32[6]) -> f32[2] {
%param = f32[6]{0} parameter(0)
%slice0.1 = f32[5]{0} slice(f32[6]{0} %param), slice={[0:5]}
%slice0.2 = f32[5]{0} slice(f32[6]{0} %param), slice={[1:6]}
%add0 = f32[5]{0} add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2)
%slice1.1 = f32[4]{0} slice(f32[5]{0} %add0), slice={[0:4]}
%slice1.2 = f32[4]{0} slice(f32[5]{0} %add0), slice={[1:5]}
%add1 = f32[4]{0} add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2)
%slice2.1 = f32[3]{0} slice(f32[4]{0} %add1), slice={[0:3]}
%slice2.2 = f32[3]{0} slice(f32[4]{0} %add1), slice={[1:4]}
%add2 = f32[3]{0} add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2)
%slice3.1 = f32[2]{0} slice(f32[3]{0} %add2), slice={[0:2]}
%slice3.2 = f32[2]{0} slice(f32[3]{0} %add2), slice={[1:3]}
ROOT %add3 = f32[2]{0} add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2)
}
ENTRY entry_computation {
p0 = f32[] parameter(0)
add = f32[] add(p0, p0)
broadcast = f32[6]{0} broadcast(add), dimensions={}
ROOT %fusion = f32[2]{0} fusion(broadcast), kind=kLoop, calls=%fused_computation
})")
.value();
HloInstruction* fusion = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* broadcast = fusion->mutable_operand(0);
EXPECT_EQ(broadcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(broadcast, fusion),
16);
EXPECT_FALSE(instruction_fusion.GetFusionNodeEvaluation(fusion)
->CodeDuplicationTooHigh(broadcast));
instruction_fusion.Fuse(broadcast, fusion, module->entry_computation());
HloInstruction* add = fusion->mutable_operand(0);
EXPECT_EQ(add->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add, fusion), 16);
EXPECT_TRUE(instruction_fusion.GetFusionNodeEvaluation(fusion)
->CodeDuplicationTooHigh(add));
}
} | void FusionNodeIndexingEvaluation::RecomputeCache() {
auto postorder =
fusion_->fused_instructions_computation()->MakeInstructionPostOrder();
std::reverse(postorder.begin(), postorder.end());
for (const auto* instruction : postorder) {
if (instruction->opcode() == HloOpcode::kParameter) {
continue;
}
UpdateIndexUsageCount(instruction);
UpdateIndexingUsersOfOperands(instruction);
}
} | TEST_F(FusionNodeIndexingEvaluationTest, RecomputeCache) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%fused_computation (param_0.5: f32[4]) -> f32[2] {
%param_0.5 = f32[4]{0} parameter(0)
%slice1.2 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[0:3]}
%slice1.3 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[1:4]}
%add1.1 = f32[3]{0} add(f32[3]{0} %slice1.2, f32[3]{0} %slice1.3)
%slice2.2 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[0:2]}
%slice2.3 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[1:3]}
ROOT %add2.1 = f32[2]{0} add(f32[2]{0} %slice2.2, f32[2]{0} %slice2.3)
}
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
p1 = f32[4]{0} parameter(1)
add0 = f32[4]{0} add(p0, p1)
ROOT %fusion = f32[2]{0} fusion(add0), kind=kLoop, calls=%fused_computation
})")
.value();
HloInstruction* fusion = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* add0 = fusion->mutable_operand(0);
EXPECT_EQ(add0->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4);
} |
#ifndef ABSL_RANDOM_INTERNAL_EXPLICIT_SEED_SEQ_H_
#define ABSL_RANDOM_INTERNAL_EXPLICIT_SEED_SEQ_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <iterator>
#include <vector>
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
class ExplicitSeedSeq {
public:
using result_type = uint32_t;
ExplicitSeedSeq() : state_() {}
ExplicitSeedSeq(const ExplicitSeedSeq& other) = default;
ExplicitSeedSeq& operator=(const ExplicitSeedSeq& other) = default;
ExplicitSeedSeq(ExplicitSeedSeq&& other) = default;
ExplicitSeedSeq& operator=(ExplicitSeedSeq&& other) = default;
template <typename Iterator>
ExplicitSeedSeq(Iterator begin, Iterator end) {
for (auto it = begin; it != end; it++) {
state_.push_back(*it & 0xffffffff);
}
}
template <typename T>
ExplicitSeedSeq(std::initializer_list<T> il)
: ExplicitSeedSeq(il.begin(), il.end()) {}
size_t size() const { return state_.size(); }
template <typename OutIterator>
void param(OutIterator out) const {
std::copy(std::begin(state_), std::end(state_), out);
}
template <typename OutIterator>
void generate(OutIterator begin, OutIterator end) {
for (size_t index = 0; begin != end; begin++) {
*begin = state_.empty() ? 0 : state_[index++];
if (index >= state_.size()) {
index = 0;
}
}
}
protected:
std::vector<uint32_t> state_;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/explicit_seed_seq.h"
#include <iterator>
#include <random>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/seed_sequences.h"
namespace {
using ::absl::random_internal::ExplicitSeedSeq;
template <typename Sseq>
bool ConformsToInterface() {
{ Sseq default_constructed_seq; }
{
uint32_t init_array[] = {1, 3, 5, 7, 9};
Sseq iterator_constructed_seq(init_array, &init_array[5]);
}
{ Sseq list_constructed_seq = {1, 3, 5, 7, 9, 11, 13}; }
{
uint32_t init_array[] = {1, 2, 3, 4, 5};
Sseq seq(init_array, &init_array[ABSL_ARRAYSIZE(init_array)]);
EXPECT_EQ(seq.size(), ABSL_ARRAYSIZE(init_array));
uint32_t state_array[ABSL_ARRAYSIZE(init_array)];
seq.param(state_array);
for (int i = 0; i < ABSL_ARRAYSIZE(state_array); i++) {
EXPECT_EQ(state_array[i], i + 1);
}
}
{
Sseq seq;
uint32_t seeds[5];
seq.generate(seeds, &seeds[ABSL_ARRAYSIZE(seeds)]);
}
return true;
}
}
TEST(SeedSequences, CheckInterfaces) {
EXPECT_TRUE(ConformsToInterface<std::seed_seq>());
EXPECT_TRUE(ConformsToInterface<ExplicitSeedSeq>());
}
TEST(ExplicitSeedSeq, DefaultConstructorGeneratesZeros) {
const size_t kNumBlocks = 128;
uint32_t outputs[kNumBlocks];
ExplicitSeedSeq seq;
seq.generate(outputs, &outputs[kNumBlocks]);
for (uint32_t& seed : outputs) {
EXPECT_EQ(seed, 0);
}
}
TEST(ExplicitSeeqSeq, SeedMaterialIsForwardedIdentically) {
const size_t kNumBlocks = 128;
uint32_t seed_material[kNumBlocks];
std::random_device urandom{"/dev/urandom"};
for (uint32_t& seed : seed_material) {
seed = urandom();
}
ExplicitSeedSeq seq(seed_material, &seed_material[kNumBlocks]);
{
const size_t kNumGenerated = kNumBlocks / 2;
uint32_t outputs[kNumGenerated];
seq.generate(outputs, &outputs[kNumGenerated]);
for (size_t i = 0; i < kNumGenerated; i++) {
EXPECT_EQ(outputs[i], seed_material[i]);
}
}
{
const size_t kNumGenerated = kNumBlocks;
uint32_t outputs[kNumGenerated];
seq.generate(outputs, &outputs[kNumGenerated]);
for (size_t i = 0; i < kNumGenerated; i++) {
EXPECT_EQ(outputs[i], seed_material[i]);
}
}
{
const size_t kNumGenerated = kNumBlocks * 2;
uint32_t outputs[kNumGenerated];
seq.generate(outputs, &outputs[kNumGenerated]);
for (size_t i = 0; i < kNumGenerated; i++) {
EXPECT_EQ(outputs[i], seed_material[i % kNumBlocks]);
}
}
}
TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
using testing::Each;
using testing::Eq;
using testing::Not;
using testing::Pointwise;
uint32_t entropy[4];
std::random_device urandom("/dev/urandom");
for (uint32_t& entry : entropy) {
entry = urandom();
}
ExplicitSeedSeq seq_from_entropy(std::begin(entropy), std::end(entropy));
{
ExplicitSeedSeq seq_copy(seq_from_entropy);
EXPECT_EQ(seq_copy.size(), seq_from_entropy.size());
std::vector<uint32_t> seeds_1(1000, 0);
std::vector<uint32_t> seeds_2(1000, 1);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
seq_copy.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
}
{
for (uint32_t& entry : entropy) {
entry = urandom();
}
ExplicitSeedSeq another_seq(std::begin(entropy), std::end(entropy));
std::vector<uint32_t> seeds_1(1000, 0);
std::vector<uint32_t> seeds_2(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
another_seq.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Not(Pointwise(Eq(), seeds_2)));
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
another_seq = seq_from_entropy;
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic pop
#endif
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
another_seq.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
}
{
std::vector<uint32_t> seeds_1(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
absl::random_internal::ExplicitSeedSeq moved_seq(
std::move(seq_from_entropy));
std::vector<uint32_t> seeds_2(1000, 1);
moved_seq.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
EXPECT_EQ(seq_from_entropy.size(), 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
EXPECT_THAT(seeds_1, Each(Eq(0)));
}
}
TEST(ExplicitSeedSeq, StdURBGGoldenTests) {
{
ExplicitSeedSeq seed_sequence{12, 34, 56};
std::minstd_rand rng(seed_sequence);
std::minstd_rand::result_type values[4] = {rng(), rng(), rng(), rng()};
EXPECT_THAT(values,
testing::ElementsAre(579252, 43785881, 464353103, 1501811174));
}
{
ExplicitSeedSeq seed_sequence{12, 34, 56};
std::mt19937 rng(seed_sequence);
std::mt19937::result_type values[4] = {rng(), rng(), rng(), rng()};
EXPECT_THAT(values, testing::ElementsAre(138416803, 151130212, 33817739,
138416803));
}
{
ExplicitSeedSeq seed_sequence{12, 34, 56};
std::mt19937_64 rng(seed_sequence);
std::mt19937_64::result_type values[4] = {rng(), rng(), rng(), rng()};
EXPECT_THAT(values,
testing::ElementsAre(19738651785169348, 1464811352364190456,
18054685302720800, 19738651785169348));
}
} | ExplicitSeedSeq(const ExplicitSeedSeq& other) = default;
ExplicitSeedSeq& operator=(const ExplicitSeedSeq& other) = default;
ExplicitSeedSeq(ExplicitSeedSeq&& other) = default;
ExplicitSeedSeq& operator=(ExplicitSeedSeq&& other) = default;
template <typename Iterator>
ExplicitSeedSeq(Iterator begin, Iterator end) {
for (auto it = begin; it != end; it++) {
state_.push_back(*it & 0xffffffff);
}
} | TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
using testing::Each;
using testing::Eq;
using testing::Not;
using testing::Pointwise;
uint32_t entropy[4];
std::random_device urandom("/dev/urandom");
for (uint32_t& entry : entropy) {
entry = urandom();
}
ExplicitSeedSeq seq_from_entropy(std::begin(entropy), std::end(entropy));
{
ExplicitSeedSeq seq_copy(seq_from_entropy);
EXPECT_EQ(seq_copy.size(), seq_from_entropy.size());
std::vector<uint32_t> seeds_1(1000, 0);
std::vector<uint32_t> seeds_2(1000, 1);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
seq_copy.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
}
{
for (uint32_t& entry : entropy) {
entry = urandom();
}
ExplicitSeedSeq another_seq(std::begin(entropy), std::end(entropy));
std::vector<uint32_t> seeds_1(1000, 0);
std::vector<uint32_t> seeds_2(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
another_seq.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Not(Pointwise(Eq(), seeds_2)));
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
another_seq = seq_from_entropy;
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic pop
#endif
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
another_seq.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
}
{
std::vector<uint32_t> seeds_1(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
absl::random_internal::ExplicitSeedSeq moved_seq(
std::move(seq_from_entropy));
std::vector<uint32_t> seeds_2(1000, 1);
moved_seq.generate(seeds_2.begin(), seeds_2.end());
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
EXPECT_EQ(seq_from_entropy.size(), 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
EXPECT_THAT(seeds_1, Each(Eq(0)));
}
} |
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/number.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
namespace cel::common_internal {
namespace {
std::string IntDebugString(int64_t value) { return absl::StrCat(value); }
}
std::string IntValueBase::DebugString() const {
return IntDebugString(NativeValue());
}
absl::StatusOr<size_t> IntValueBase::GetSerializedSize(
AnyToJsonConverter&) const {
return internal::SerializedInt64ValueSize(NativeValue());
}
absl::Status IntValueBase::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return internal::SerializeInt64Value(NativeValue(), value);
}
absl::StatusOr<absl::Cord> IntValueBase::Serialize(
AnyToJsonConverter& value_manager) const {
absl::Cord value;
CEL_RETURN_IF_ERROR(SerializeTo(value_manager, value));
return value;
}
absl::StatusOr<std::string> IntValueBase::GetTypeUrl(
absl::string_view prefix) const {
return MakeTypeUrlWithPrefix(prefix, "google.protobuf.Int64Value");
}
absl::StatusOr<Any> IntValueBase::ConvertToAny(
AnyToJsonConverter& value_manager, absl::string_view prefix) const {
CEL_ASSIGN_OR_RETURN(auto value, Serialize(value_manager));
CEL_ASSIGN_OR_RETURN(auto type_url, GetTypeUrl(prefix));
return MakeAny(std::move(type_url), std::move(value));
}
absl::StatusOr<Json> IntValueBase::ConvertToJson(AnyToJsonConverter&) const {
return JsonInt(NativeValue());
}
absl::Status IntValueBase::Equal(ValueManager&, ValueView other,
Value& result) const {
if (auto other_value = As<IntValueView>(other); other_value.has_value()) {
result = BoolValueView{NativeValue() == other_value->NativeValue()};
return absl::OkStatus();
}
if (auto other_value = As<DoubleValueView>(other); other_value.has_value()) {
result =
BoolValueView{internal::Number::FromInt64(NativeValue()) ==
internal::Number::FromDouble(other_value->NativeValue())};
return absl::OkStatus();
}
if (auto other_value = As<UintValueView>(other); other_value.has_value()) {
result =
BoolValueView{internal::Number::FromInt64(NativeValue()) ==
internal::Number::FromUint64(other_value->NativeValue())};
return absl::OkStatus();
}
result = BoolValueView{false};
return absl::OkStatus();
}
absl::StatusOr<Value> IntValueBase::Equal(ValueManager& value_manager,
ValueView other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
} | #include <cstdint>
#include <sstream>
#include "absl/hash/hash.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
using cel::internal::IsOkAndHolds;
using IntValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(IntValueTest, Kind) {
EXPECT_EQ(IntValue(1).kind(), IntValue::kKind);
EXPECT_EQ(Value(IntValue(1)).kind(), IntValue::kKind);
}
TEST_P(IntValueTest, DebugString) {
{
std::ostringstream out;
out << IntValue(1);
EXPECT_EQ(out.str(), "1");
}
{
std::ostringstream out;
out << Value(IntValue(1));
EXPECT_EQ(out.str(), "1");
}
}
TEST_P(IntValueTest, GetSerializedSize) {
EXPECT_THAT(IntValue().GetSerializedSize(value_manager()), IsOkAndHolds(0));
}
TEST_P(IntValueTest, ConvertToAny) {
EXPECT_THAT(IntValue().ConvertToAny(value_manager()),
IsOkAndHolds(MakeAny(MakeTypeUrl("google.protobuf.Int64Value"),
absl::Cord())));
}
TEST_P(IntValueTest, ConvertToJson) {
EXPECT_THAT(IntValue(1).ConvertToJson(value_manager()),
IsOkAndHolds(Json(1.0)));
}
TEST_P(IntValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(IntValue(1)), NativeTypeId::For<IntValue>());
EXPECT_EQ(NativeTypeId::Of(Value(IntValue(1))),
NativeTypeId::For<IntValue>());
}
TEST_P(IntValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<IntValue>(IntValue(1)));
EXPECT_TRUE(InstanceOf<IntValue>(Value(IntValue(1))));
}
TEST_P(IntValueTest, Cast) {
EXPECT_THAT(Cast<IntValue>(IntValue(1)), An<IntValue>());
EXPECT_THAT(Cast<IntValue>(Value(IntValue(1))), An<IntValue>());
}
TEST_P(IntValueTest, As) {
EXPECT_THAT(As<IntValue>(IntValue(1)), Ne(absl::nullopt));
EXPECT_THAT(As<IntValue>(Value(IntValue(1))), Ne(absl::nullopt));
}
TEST_P(IntValueTest, HashValue) {
EXPECT_EQ(absl::HashOf(IntValue(1)), absl::HashOf(int64_t{1}));
}
TEST_P(IntValueTest, Equality) {
EXPECT_NE(IntValue(0), 1);
EXPECT_NE(1, IntValue(0));
EXPECT_NE(IntValue(0), IntValue(1));
}
TEST_P(IntValueTest, LessThan) {
EXPECT_LT(IntValue(0), 1);
EXPECT_LT(0, IntValue(1));
EXPECT_LT(IntValue(0), IntValue(1));
}
INSTANTIATE_TEST_SUITE_P(
IntValueTest, IntValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
IntValueTest::ToString);
using IntValueViewTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(IntValueViewTest, Kind) {
EXPECT_EQ(IntValueView(1).kind(), IntValueView::kKind);
EXPECT_EQ(ValueView(IntValueView(1)).kind(), IntValueView::kKind);
}
TEST_P(IntValueViewTest, DebugString) {
{
std::ostringstream out;
out << IntValueView(1);
EXPECT_EQ(out.str(), "1");
}
{
std::ostringstream out;
out << ValueView(IntValueView(1));
EXPECT_EQ(out.str(), "1");
}
}
TEST_P(IntValueViewTest, GetSerializedSize) {
EXPECT_THAT(IntValueView().GetSerializedSize(value_manager()),
IsOkAndHolds(0));
}
TEST_P(IntValueViewTest, ConvertToAny) {
EXPECT_THAT(IntValueView().ConvertToAny(value_manager()),
IsOkAndHolds(MakeAny(MakeTypeUrl("google.protobuf.Int64Value"),
absl::Cord())));
}
TEST_P(IntValueViewTest, ConvertToJson) {
EXPECT_THAT(IntValueView(1).ConvertToJson(value_manager()),
IsOkAndHolds(Json(1.0)));
}
TEST_P(IntValueViewTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(IntValueView(1)),
NativeTypeId::For<IntValueView>());
EXPECT_EQ(NativeTypeId::Of(ValueView(IntValueView(1))),
NativeTypeId::For<IntValueView>());
}
TEST_P(IntValueViewTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<IntValueView>(IntValueView(1)));
EXPECT_TRUE(InstanceOf<IntValueView>(ValueView(IntValueView(1))));
}
TEST_P(IntValueViewTest, Cast) {
EXPECT_THAT(Cast<IntValueView>(IntValueView(1)), An<IntValueView>());
EXPECT_THAT(Cast<IntValueView>(ValueView(IntValueView(1))),
An<IntValueView>());
}
TEST_P(IntValueViewTest, As) {
EXPECT_THAT(As<IntValueView>(IntValueView(1)), Ne(absl::nullopt));
EXPECT_THAT(As<IntValueView>(ValueView(IntValueView(1))), Ne(absl::nullopt));
}
TEST_P(IntValueViewTest, HashValue) {
EXPECT_EQ(absl::HashOf(IntValueView(1)), absl::HashOf(int64_t{1}));
}
TEST_P(IntValueViewTest, Equality) {
EXPECT_NE(IntValueView(IntValue(0)), 1);
EXPECT_NE(1, IntValueView(0));
EXPECT_NE(IntValueView(0), IntValueView(1));
EXPECT_NE(IntValueView(0), IntValue(1));
EXPECT_NE(IntValue(1), IntValueView(0));
}
TEST_P(IntValueViewTest, LessThan) {
EXPECT_LT(IntValueView(0), 1);
EXPECT_LT(0, IntValueView(1));
EXPECT_LT(IntValueView(0), IntValueView(1));
EXPECT_LT(IntValueView(0), IntValue(1));
EXPECT_LT(IntValue(0), IntValueView(1));
}
INSTANTIATE_TEST_SUITE_P(
IntValueViewTest, IntValueViewTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
IntValueViewTest::ToString);
}
} | absl::StatusOr<std::string> IntValueBase::GetTypeUrl(
absl::string_view prefix) const {
return MakeTypeUrlWithPrefix(prefix, "google.protobuf.Int64Value");
} | TEST_P(IntValueTest, ConvertToAny) {
EXPECT_THAT(IntValue().ConvertToAny(value_manager()),
IsOkAndHolds(MakeAny(MakeTypeUrl("google.protobuf.Int64Value"),
absl::Cord())));
}
TEST_P(IntValueViewTest, ConvertToAny) {
EXPECT_THAT(IntValueView().ConvertToAny(value_manager()),
IsOkAndHolds(MakeAny(MakeTypeUrl("google.protobuf.Int64Value"),
absl::Cord())));
} |
#include "absl/strings/numbers.h"
#include <algorithm>
#include <cassert>
#include <cfloat>
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iterator>
#include <limits>
#include <system_error>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/ascii.h"
#include "absl/strings/charconv.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
bool SimpleAtof(absl::string_view str, absl::Nonnull<float*> out) {
*out = 0.0;
str = StripAsciiWhitespace(str);
if (!str.empty() && str[0] == '+') {
str.remove_prefix(1);
if (!str.empty() && str[0] == '-') {
return false;
}
}
auto result = absl::from_chars(str.data(), str.data() + str.size(), *out);
if (result.ec == std::errc::invalid_argument) {
return false;
}
if (result.ptr != str.data() + str.size()) {
return false;
}
if (result.ec == std::errc::result_out_of_range) {
if (*out > 1.0) {
*out = std::numeric_limits<float>::infinity();
} else if (*out < -1.0) {
*out = -std::numeric_limits<float>::infinity();
}
}
return true;
}
bool SimpleAtod(absl::string_view str, absl::Nonnull<double*> out) {
*out = 0.0;
str = StripAsciiWhitespace(str);
if (!str.empty() && str[0] == '+') {
str.remove_prefix(1);
if (!str.empty() && str[0] == '-') {
return false;
}
}
auto result = absl::from_chars(str.data(), str.data() + str.size(), *out);
if (result.ec == std::errc::invalid_argument) {
return false;
}
if (result.ptr != str.data() + str.size()) {
return false;
}
if (result.ec == std::errc::result_out_of_range) {
if (*out > 1.0) {
*out = std::numeric_limits<double>::infinity();
} else if (*out < -1.0) {
*out = -std::numeric_limits<double>::infinity();
}
}
return true;
}
bool SimpleAtob(absl::string_view str, absl::Nonnull<bool*> out) {
ABSL_RAW_CHECK(out != nullptr, "Output pointer must not be nullptr.");
if (EqualsIgnoreCase(str, "true") || EqualsIgnoreCase(str, "t") ||
EqualsIgnoreCase(str, "yes") || EqualsIgnoreCase(str, "y") ||
EqualsIgnoreCase(str, "1")) {
*out = true;
return true;
}
if (EqualsIgnoreCase(str, "false") || EqualsIgnoreCase(str, "f") ||
EqualsIgnoreCase(str, "no") || EqualsIgnoreCase(str, "n") ||
EqualsIgnoreCase(str, "0")) {
*out = false;
return true;
}
return false;
}
namespace {
constexpr uint32_t kTwoZeroBytes = 0x0101 * '0';
constexpr uint64_t kFourZeroBytes = 0x01010101 * '0';
constexpr uint64_t kEightZeroBytes = 0x0101010101010101ull * '0';
constexpr uint64_t kDivisionBy10Mul = 103u;
constexpr uint64_t kDivisionBy10Div = 1 << 10;
constexpr uint64_t kDivisionBy100Mul = 10486u;
constexpr uint64_t kDivisionBy100Div = 1 << 20;
inline char* EncodeHundred(uint32_t n, absl::Nonnull<char*> out_str) {
int num_digits = static_cast<int>(n - 10) >> 8;
uint32_t div10 = (n * kDivisionBy10Mul) / kDivisionBy10Div;
uint32_t mod10 = n - 10u * div10;
uint32_t base = kTwoZeroBytes + div10 + (mod10 << 8);
base >>= num_digits & 8;
little_endian::Store16(out_str, static_cast<uint16_t>(base));
return out_str + 2 + num_digits;
}
inline char* EncodeTenThousand(uint32_t n, absl::Nonnull<char*> out_str) {
uint32_t div100 = (n * kDivisionBy100Mul) / kDivisionBy100Div;
uint32_t mod100 = n - 100ull * div100;
uint32_t hundreds = (mod100 << 16) + div100;
uint32_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
tens &= (0xFull << 16) | 0xFull;
tens += (hundreds - 10ull * tens) << 8;
ABSL_ASSUME(tens != 0);
uint32_t zeroes = static_cast<uint32_t>(absl::countr_zero(tens)) & (0 - 8u);
tens += kFourZeroBytes;
tens >>= zeroes;
little_endian::Store32(out_str, tens);
return out_str + sizeof(tens) - zeroes / 8;
}
inline uint64_t PrepareEightDigits(uint32_t i) {
ABSL_ASSUME(i < 10000'0000);
uint32_t hi = i / 10000;
uint32_t lo = i % 10000;
uint64_t merged = hi | (uint64_t{lo} << 32);
uint64_t div100 = ((merged * kDivisionBy100Mul) / kDivisionBy100Div) &
((0x7Full << 32) | 0x7Full);
uint64_t mod100 = merged - 100ull * div100;
uint64_t hundreds = (mod100 << 16) + div100;
uint64_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
tens &= (0xFull << 48) | (0xFull << 32) | (0xFull << 16) | 0xFull;
tens += (hundreds - 10ull * tens) << 8;
return tens;
}
inline ABSL_ATTRIBUTE_ALWAYS_INLINE absl::Nonnull<char*> EncodeFullU32(
uint32_t n, absl::Nonnull<char*> out_str) {
if (n < 10) {
*out_str = static_cast<char>('0' + n);
return out_str + 1;
}
if (n < 100'000'000) {
uint64_t bottom = PrepareEightDigits(n);
ABSL_ASSUME(bottom != 0);
uint32_t zeroes =
static_cast<uint32_t>(absl::countr_zero(bottom)) & (0 - 8u);
little_endian::Store64(out_str, (bottom + kEightZeroBytes) >> zeroes);
return out_str + sizeof(bottom) - zeroes / 8;
}
uint32_t div08 = n / 100'000'000;
uint32_t mod08 = n % 100'000'000;
uint64_t bottom = PrepareEightDigits(mod08) + kEightZeroBytes;
out_str = EncodeHundred(div08, out_str);
little_endian::Store64(out_str, bottom);
return out_str + sizeof(bottom);
}
inline ABSL_ATTRIBUTE_ALWAYS_INLINE char* EncodeFullU64(uint64_t i,
char* buffer) {
if (i <= std::numeric_limits<uint32_t>::max()) {
return EncodeFullU32(static_cast<uint32_t>(i), buffer);
}
uint32_t mod08;
if (i < 1'0000'0000'0000'0000ull) {
uint32_t div08 = static_cast<uint32_t>(i / 100'000'000ull);
mod08 = static_cast<uint32_t>(i % 100'000'000ull);
buffer = EncodeFullU32(div08, buffer);
} else {
uint64_t div08 = i / 100'000'000ull;
mod08 = static_cast<uint32_t>(i % 100'000'000ull);
uint32_t div016 = static_cast<uint32_t>(div08 / 100'000'000ull);
uint32_t div08mod08 = static_cast<uint32_t>(div08 % 100'000'000ull);
uint64_t mid_result = PrepareEightDigits(div08mod08) + kEightZeroBytes;
buffer = EncodeTenThousand(div016, buffer);
little_endian::Store64(buffer, mid_result);
buffer += sizeof(mid_result);
}
uint64_t mod_result = PrepareEightDigits(mod08) + kEightZeroBytes;
little_endian::Store64(buffer, mod_result);
return buffer + sizeof(mod_result);
}
}
void numbers_internal::PutTwoDigits(uint32_t i, absl::Nonnull<char*> buf) {
assert(i < 100);
uint32_t base = kTwoZeroBytes;
uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
uint32_t mod10 = i - 10u * div10;
base += div10 + (mod10 << 8);
little_endian::Store16(buf, static_cast<uint16_t>(base));
}
absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
uint32_t n, absl::Nonnull<char*> out_str) {
out_str = EncodeFullU32(n, out_str);
*out_str = '\0';
return out_str;
}
absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
int32_t i, absl::Nonnull<char*> buffer) {
uint32_t u = static_cast<uint32_t>(i);
if (i < 0) {
*buffer++ = '-';
u = 0 - u;
}
buffer = EncodeFullU32(u, buffer);
*buffer = '\0';
return buffer;
}
absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
uint64_t i, absl::Nonnull<char*> buffer) {
buffer = EncodeFullU64(i, buffer);
*buffer = '\0';
return buffer;
}
absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
int64_t i, absl::Nonnull<char*> buffer) {
uint64_t u = static_cast<uint64_t>(i);
if (i < 0) {
*buffer++ = '-';
u = 0 - u;
}
buffer = EncodeFullU64(u, buffer);
*buffer = '\0';
return buffer;
}
static std::pair<uint64_t, uint64_t> Mul32(std::pair<uint64_t, uint64_t> num,
uint32_t mul) {
uint64_t bits0_31 = num.second & 0xFFFFFFFF;
uint64_t bits32_63 = num.second >> 32;
uint64_t bits64_95 = num.first & 0xFFFFFFFF;
uint64_t bits96_127 = num.first >> 32;
bits0_31 *= mul;
bits32_63 *= mul;
bits64_95 *= mul;
bits96_127 *= mul;
uint64_t bits0_63 = bits0_31 + (bits32_63 << 32);
uint64_t bits64_127 = bits64_95 + (bits96_127 << 32) + (bits32_63 >> 32) +
(bits0_63 < bits0_31);
uint64_t bits128_up = (bits96_127 >> 32) + (bits64_127 < bits64_95);
if (bits128_up == 0) return {bits64_127, bits0_63};
auto shift = static_cast<unsigned>(bit_width(bits128_up));
uint64_t lo = (bits0_63 >> shift) + (bits64_127 << (64 - shift));
uint64_t hi = (bits64_127 >> shift) + (bits128_up << (64 - shift));
return {hi, lo};
}
static std::pair<uint64_t, uint64_t> PowFive(uint64_t num, int expfive) {
std::pair<uint64_t, uint64_t> result = {num, 0};
while (expfive >= 13) {
result = Mul32(result, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5);
expfive -= 13;
}
constexpr uint32_t powers_of_five[13] = {
1,
5,
5 * 5,
5 * 5 * 5,
5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5};
result = Mul32(result, powers_of_five[expfive & 15]);
int shift = countl_zero(result.first);
if (shift != 0) {
result.first = (result.first << shift) + (result.second >> (64 - shift));
result.second = (result.second << shift);
}
return result;
}
struct ExpDigits {
int32_t exponent;
char digits[6];
};
static ExpDigits SplitToSix(const double value) {
ExpDigits exp_dig;
int exp = 5;
double d = value;
if (d >= 999999.5) {
if (d >= 1e+261) exp += 256, d *= 1e-256;
if (d >= 1e+133) exp += 128, d *= 1e-128;
if (d >= 1e+69) exp += 64, d *= 1e-64;
if (d >= 1e+37) exp += 32, d *= 1e-32;
if (d >= 1e+21) exp += 16, d *= 1e-16;
if (d >= 1e+13) exp += 8, d *= 1e-8;
if (d >= 1e+9) exp += 4, d *= 1e-4;
if (d >= 1e+7) exp += 2, d *= 1e-2;
if (d >= 1e+6) exp += 1, d *= 1e-1;
} else {
if (d < 1e-250) exp -= 256, d *= 1e256;
if (d < 1e-122) exp -= 128, d *= 1e128;
if (d < 1e-58) exp -= 64, d *= 1e64;
if (d < 1e-26) exp -= 32, d *= 1e32;
if (d < 1e-10) exp -= 16, d *= 1e16;
if (d < 1e-2) exp -= 8, d *= 1e8;
if (d < 1e+2) exp -= 4, d *= 1e4;
if (d < 1e+4) exp -= 2, d *= 1e2;
if (d < 1e+5) exp -= 1, d *= 1e1;
}
uint64_t d64k = d * 65536;
uint32_t dddddd;
if ((d64k % 65536) == 32767 || (d64k % 65536) == 32768) {
dddddd = static_cast<uint32_t>(d64k / 65536);
int exp2;
double m = std::frexp(value, &exp2);
uint64_t mantissa = m * (32768.0 * 65536.0 * 65536.0 * 65536.0);
mantissa <<= 1;
exp2 -= 64;
std::pair<uint64_t, uint64_t> edge, val;
if (exp >= 6) {
edge = PowFive(2 * dddddd + 1, exp - 5);
val.first = mantissa;
val.second = 0;
} else {
edge = PowFive(2 * dddddd + 1, 0);
val = PowFive(mantissa, 5 - exp);
}
if (val > edge) {
dddddd++;
} else if (val == edge) {
dddddd += (dddddd & 1);
}
} else {
dddddd = static_cast<uint32_t>((d64k + 32768) / 65536);
}
if (dddddd == 1000000) {
dddddd = 100000;
exp += 1;
}
exp_dig.exponent = exp;
uint32_t two_digits = dddddd / 10000;
dddddd -= two_digits * 10000;
numbers_internal::PutTwoDigits(two_digits, &exp_dig.digits[0]);
two_digits = dddddd / 100;
dddddd -= two_digits * 100;
numbers_internal::PutTwoDigits(two_digits, &exp_dig.digits[2]);
numbers_internal::PutTwoDigits(dddddd, &exp_dig.digits[4]);
return exp_dig;
}
size_t numbers_internal::SixDigitsToBuffer(double d,
absl::Nonnull<char*> const buffer) {
static_assert(std::numeric_limits<float>::is_iec559,
"IEEE-754/IEC-559 support only");
char* out = buffer;
if (std::isnan(d)) {
strcpy(out, "nan");
return 3;
}
if (d == 0) {
if (std::signbit(d)) *out++ = '-';
*out++ = '0';
*out = 0;
return static_cast<size_t>(out - buffer);
}
if (d < 0) {
*out++ = '-';
d = -d;
}
if (d > std::numeric_limits<double>::max()) {
strcpy(out, "inf");
return static_cast<size_t>(out + 3 - buffer);
}
auto exp_dig = SplitToSix(d);
int exp = exp_dig.exponent;
const char* digits = exp_dig.digits;
out[0] = '0';
out[1] = '.';
switch (exp) {
case 5:
memcpy(out, &digits[0], 6), out += 6;
*out = 0;
return static_cast<size_t>(out - buffer);
case 4:
memcpy(out, &digits[0], 5), out += 5;
if (digits[5] != '0') {
*out++ = '.';
*out++ = digits[5];
}
*out = 0;
return static_cast<size_t>(out - buffer);
case 3:
memcpy(out, &digits[0], 4), out += 4;
if ((digits[5] | digits[4]) != '0') {
*out++ = '.';
*out++ = digits[4];
if (digits[5] != '0') *out++ = digits[5];
}
*out = 0;
return static_cast<size_t>(out - buffer);
case 2:
memcpy(out, &digits[0], 3), out += 3;
*out++ = '.';
memcpy(out, &digits[3], 3);
out += 3;
while (out[-1] == '0') --out;
if (out[-1] == '.') --out;
*out = 0;
return static_cast<size_t>(out - buffer);
case 1:
memcpy(out, &digits[0], 2), out += 2;
*out++ = '.';
memcpy(out, &digits[2], 4);
out += 4;
while (out[-1] == '0') --out;
if (out[-1] == '.') --out;
*out = 0;
return static_cast<size_t>(out - buffer);
case 0:
memcpy(out, &digits[0], 1), out += 1;
*out++ = '.';
memcpy(out, &digits[1], 5);
out += 5;
while (out[-1] == '0') --out;
if (out[-1] == '.') --out;
*out = 0;
return static_cast<size_t>(out - buffer);
case -4:
out[2] = '0';
++out;
ABSL_FALLTHROUGH_INTENDED;
case -3:
out[2] = '0';
++out;
ABSL_FALLTHROUGH_INTENDED;
case -2:
out[2] = '0';
++out;
ABSL_FALLTHROUGH_INTENDED;
case -1:
out += 2;
memcpy(out, &digits[0], 6);
out += 6;
while (out[-1] == '0') --out;
*out = 0;
return static_cast<size_t>(out - buffer);
}
assert(exp < -4 || exp >= 6);
out[0] = digits[0];
assert(out[1] == '.');
out += 2;
memcpy(out, &digits[1], 5), out += 5;
while (out[-1] == '0') --out;
if (out[-1] == '.') --out;
*out++ = 'e';
if (exp > 0) {
*out++ = '+';
} else {
*out++ = '-';
exp = -exp;
}
if (exp > 99) {
int dig1 = exp / 100;
exp -= dig1 * 100;
*out++ = '0' + static_cast<char>(dig1);
}
PutTwoDigits(static_cast<uint32_t>(exp), out);
out += 2;
*out = 0;
return static_cast<size_t>(out - buffer);
}
namespace {
static const int8_t kAsciiToInt[256] = {
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 36, 36, 36, 36, 36, 36, 36, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
36, 36, 36, 36, 36, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36};
inline bool safe_parse_sign_and_base(
absl::Nonnull<absl::string_view*> text ,
absl::Nonnull<int*> base_ptr ,
absl::Nonnull<bool*> negative_ptr ) {
if (text->data() == nullptr) {
return false;
}
const char* start = text->data();
const char* end = start + text->size();
int base = *base_ptr;
while (start < end &&
absl::ascii_isspace(static_cast<unsigned char>(start[0]))) {
++start;
}
while (start < end &&
absl::ascii_isspace(static_cast<unsigned char>(end[-1]))) {
--end;
}
if (start >= end) {
return false;
}
*negative_ptr = (start[0] == '-');
if (*negative_ptr || start[0] == '+') {
++start;
if (start >= end) {
return false;
}
}
if (base == 0) {
if (end - start >= 2 && start[0] == '0' &&
(start[1] == 'x' || start[1] == 'X')) {
base = 16;
start += 2;
if (start >= end) {
return false;
}
} else if (end - start >= 1 && start[0] == '0') {
base = 8;
start += 1;
} else {
base = 10;
}
} else if (base == 16) {
if (end - start >= 2 && start[0] == '0' &&
(start[1] == 'x' || start[1] == 'X')) {
start += 2;
if (start >= end) {
return false;
}
}
} else if (base >= 2 && base <= 36) {
} else {
return false;
}
*text = absl::string_view(start, static_cast<size_t>(end - start));
*base_ptr = base;
return true;
}
template <typename IntType>
struct LookupTables {
ABSL_CONST_INIT static const IntType kVmaxOverBase[];
ABSL_CONST_INIT static const IntType kVminOverBase[];
};
#define X_OVER_BASE_INITIALIZER(X) \
{ \
0, 0, X / 2, X / 3, X / 4, X / 5, X / 6, X / 7, X / 8, X / 9, X / 10, \
X / 11, X / 12, X / 13, X / 14, X / 15, X / 16, X / 17, X / 18, \
X / 19, X / 20, X / 21, X / 22, X / 23, X / 24, X / 25, X / 26, \
X / 27, X / 28, X / 29, X / 30, X / 31, X / 32, X / 33, X / 34, \
X / 35, X / 36, \
}
template <>
ABSL_CONST_INIT const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
0,
0,
MakeUint128(9223372036854775807u, 18446744073709551615u),
MakeUint128(6148914691236517205u, 6148914691236517205u),
MakeUint128(4611686018427387903u, 18446744073709551615u),
MakeUint128(3689348814741910323u, 3689348814741910323u),
MakeUint128(3074457345618258602u, 12297829382473034410u),
MakeUint128(2635249153387078802u, 5270498306774157604u),
MakeUint128(2305843009213693951u, 18446744073709551615u),
MakeUint128(2049638230412172401u, 14347467612885206812u),
MakeUint128(1844674407370955161u, 11068046444225730969u),
MakeUint128(1676976733973595601u, 8384883669867978007u),
MakeUint128(1537228672809129301u, 6148914691236517205u),
MakeUint128(1418980313362273201u, 4256940940086819603u),
MakeUint128(1317624576693539401u, 2635249153387078802u),
MakeUint128(1229782938247303441u, 1229782938247303441u),
MakeUint128(1152921504606846975u, 18446744073709551615u),
MakeUint128(1085102592571150095u, 1085102592571150095u),
MakeUint128(1024819115206086200u, 16397105843297379214u),
MakeUint128(970881267037344821u, 16504981539634861972u),
MakeUint128(922337203685477580u, 14757395258967641292u),
MakeUint128(878416384462359600u, 14054662151397753612u),
MakeUint128(838488366986797800u, 13415813871788764811u),
MakeUint128(802032351030850070u, 4812194106185100421u),
MakeUint128(768614336404564650u, 12297829382473034410u),
MakeUint128(737869762948382064u, 11805916207174113034u),
MakeUint128(709490156681136600u, 11351842506898185609u),
MakeUint128(683212743470724133u, 17080318586768103348u),
MakeUint128(658812288346769700u, 10540996613548315209u),
MakeUint128(636094623231363848u, 15266270957552732371u),
MakeUint128(614891469123651720u, 9838263505978427528u),
MakeUint128(595056260442243600u, 9520900167075897608u),
MakeUint128(576460752303423487u, 18446744073709551615u),
MakeUint128(558992244657865200u, 8943875914525843207u),
MakeUint128(542551296285575047u, 9765923333140350855u),
MakeUint128(527049830677415760u, 8432797290838652167u),
MakeUint128(512409557603043100u, 8198552921648689607u),
}; | #include "absl/strings/numbers.h"
#include <sys/types.h>
#include <cfenv>
#include <cfloat>
#include <cinttypes>
#include <climits>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ios>
#include <limits>
#include <numeric>
#include <random>
#include <set>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/log.h"
#include "absl/numeric/int128.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/strings/internal/numbers_test_common.h"
#include "absl/strings/internal/ostringstream.h"
#include "absl/strings/internal/pow10_helper.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace {
using absl::SimpleAtoi;
using absl::SimpleHexAtoi;
using absl::numbers_internal::kSixDigitsToBufferSize;
using absl::numbers_internal::safe_strto32_base;
using absl::numbers_internal::safe_strto64_base;
using absl::numbers_internal::safe_strtou32_base;
using absl::numbers_internal::safe_strtou64_base;
using absl::numbers_internal::SixDigitsToBuffer;
using absl::strings_internal::Itoa;
using absl::strings_internal::strtouint32_test_cases;
using absl::strings_internal::strtouint64_test_cases;
using testing::Eq;
using testing::MatchesRegex;
using testing::Pointee;
const int kFloatNumCases = 5000000;
std::string PerfectDtoa(double d) {
if (d == 0) return "0";
if (d < 0) return "-" + PerfectDtoa(-d);
int64_t mantissa, exp = 0;
while (d >= 1ULL << 63) ++exp, d *= 0.5;
while ((mantissa = d) != d) --exp, d *= 2.0;
constexpr int maxlen = 1100;
char buf[maxlen + 5];
for (int64_t num = mantissa, pos = maxlen; --pos >= 0;) {
buf[pos] = '0' + (num % 10);
num /= 10;
}
char* begin = &buf[0];
char* end = buf + maxlen;
for (int i = 0; i != exp; i += (exp > 0) ? 1 : -1) {
int carry = 0;
for (char* p = end; --p != begin;) {
int dig = *p - '0';
dig = dig * (exp > 0 ? 2 : 5) + carry;
carry = dig / 10;
dig %= 10;
*p = '0' + dig;
}
}
if (exp < 0) {
memmove(end + 1 + exp, end + exp, 1 - exp);
end[exp] = '.';
++end;
}
while (*begin == '0' && begin[1] != '.') ++begin;
return {begin, end};
}
TEST(ToString, PerfectDtoa) {
EXPECT_THAT(PerfectDtoa(1), Eq("1"));
EXPECT_THAT(PerfectDtoa(0.1),
Eq("0.1000000000000000055511151231257827021181583404541015625"));
EXPECT_THAT(PerfectDtoa(1e24), Eq("999999999999999983222784"));
EXPECT_THAT(PerfectDtoa(5e-324), MatchesRegex("0.0000.*625"));
for (int i = 0; i < 100; ++i) {
for (double multiplier :
{1e-300, 1e-200, 1e-100, 0.1, 1.0, 10.0, 1e100, 1e300}) {
double d = multiplier * i;
std::string s = PerfectDtoa(d);
EXPECT_DOUBLE_EQ(d, strtod(s.c_str(), nullptr));
}
}
}
template <typename integer>
struct MyInteger {
integer i;
explicit constexpr MyInteger(integer i) : i(i) {}
constexpr operator integer() const { return i; }
constexpr MyInteger operator+(MyInteger other) const { return i + other.i; }
constexpr MyInteger operator-(MyInteger other) const { return i - other.i; }
constexpr MyInteger operator*(MyInteger other) const { return i * other.i; }
constexpr MyInteger operator/(MyInteger other) const { return i / other.i; }
constexpr bool operator<(MyInteger other) const { return i < other.i; }
constexpr bool operator<=(MyInteger other) const { return i <= other.i; }
constexpr bool operator==(MyInteger other) const { return i == other.i; }
constexpr bool operator>=(MyInteger other) const { return i >= other.i; }
constexpr bool operator>(MyInteger other) const { return i > other.i; }
constexpr bool operator!=(MyInteger other) const { return i != other.i; }
integer as_integer() const { return i; }
};
typedef MyInteger<int64_t> MyInt64;
typedef MyInteger<uint64_t> MyUInt64;
void CheckInt32(int32_t x) {
char buffer[absl::numbers_internal::kFastToBufferSize];
char* actual = absl::numbers_internal::FastIntToBuffer(x, buffer);
std::string expected = std::to_string(x);
EXPECT_EQ(expected, std::string(buffer, actual)) << " Input " << x;
char* generic_actual = absl::numbers_internal::FastIntToBuffer(x, buffer);
EXPECT_EQ(expected, std::string(buffer, generic_actual)) << " Input " << x;
}
void CheckInt64(int64_t x) {
char buffer[absl::numbers_internal::kFastToBufferSize + 3];
buffer[0] = '*';
buffer[23] = '*';
buffer[24] = '*';
char* actual = absl::numbers_internal::FastIntToBuffer(x, &buffer[1]);
std::string expected = std::to_string(x);
EXPECT_EQ(expected, std::string(&buffer[1], actual)) << " Input " << x;
EXPECT_EQ(buffer[0], '*');
EXPECT_EQ(buffer[23], '*');
EXPECT_EQ(buffer[24], '*');
char* my_actual =
absl::numbers_internal::FastIntToBuffer(MyInt64(x), &buffer[1]);
EXPECT_EQ(expected, std::string(&buffer[1], my_actual)) << " Input " << x;
}
void CheckUInt32(uint32_t x) {
char buffer[absl::numbers_internal::kFastToBufferSize];
char* actual = absl::numbers_internal::FastIntToBuffer(x, buffer);
std::string expected = std::to_string(x);
EXPECT_EQ(expected, std::string(buffer, actual)) << " Input " << x;
char* generic_actual = absl::numbers_internal::FastIntToBuffer(x, buffer);
EXPECT_EQ(expected, std::string(buffer, generic_actual)) << " Input " << x;
}
void CheckUInt64(uint64_t x) {
char buffer[absl::numbers_internal::kFastToBufferSize + 1];
char* actual = absl::numbers_internal::FastIntToBuffer(x, &buffer[1]);
std::string expected = std::to_string(x);
EXPECT_EQ(expected, std::string(&buffer[1], actual)) << " Input " << x;
char* generic_actual = absl::numbers_internal::FastIntToBuffer(x, &buffer[1]);
EXPECT_EQ(expected, std::string(&buffer[1], generic_actual))
<< " Input " << x;
char* my_actual =
absl::numbers_internal::FastIntToBuffer(MyUInt64(x), &buffer[1]);
EXPECT_EQ(expected, std::string(&buffer[1], my_actual)) << " Input " << x;
}
void CheckHex64(uint64_t v) {
char expected[16 + 1];
std::string actual = absl::StrCat(absl::Hex(v, absl::kZeroPad16));
snprintf(expected, sizeof(expected), "%016" PRIx64, static_cast<uint64_t>(v));
EXPECT_EQ(expected, actual) << " Input " << v;
actual = absl::StrCat(absl::Hex(v, absl::kSpacePad16));
snprintf(expected, sizeof(expected), "%16" PRIx64, static_cast<uint64_t>(v));
EXPECT_EQ(expected, actual) << " Input " << v;
}
TEST(Numbers, TestFastPrints) {
for (int i = -100; i <= 100; i++) {
CheckInt32(i);
CheckInt64(i);
}
for (int i = 0; i <= 100; i++) {
CheckUInt32(i);
CheckUInt64(i);
}
CheckInt32(INT_MIN);
CheckInt32(INT_MAX);
CheckInt64(LONG_MIN);
CheckInt64(uint64_t{1000000000});
CheckInt64(uint64_t{9999999999});
CheckInt64(uint64_t{100000000000000});
CheckInt64(uint64_t{999999999999999});
CheckInt64(uint64_t{1000000000000000000});
CheckInt64(uint64_t{1199999999999999999});
CheckInt64(int64_t{-700000000000000000});
CheckInt64(LONG_MAX);
CheckUInt32(std::numeric_limits<uint32_t>::max());
CheckUInt64(uint64_t{1000000000});
CheckUInt64(uint64_t{9999999999});
CheckUInt64(uint64_t{100000000000000});
CheckUInt64(uint64_t{999999999999999});
CheckUInt64(uint64_t{1000000000000000000});
CheckUInt64(uint64_t{1199999999999999999});
CheckUInt64(std::numeric_limits<uint64_t>::max());
for (int i = 0; i < 10000; i++) {
CheckHex64(i);
}
CheckHex64(uint64_t{0x123456789abcdef0});
}
template <typename int_type, typename in_val_type>
void VerifySimpleAtoiGood(in_val_type in_value, int_type exp_value) {
std::string s;
absl::strings_internal::OStringStream(&s) << in_value;
int_type x = static_cast<int_type>(~exp_value);
EXPECT_TRUE(SimpleAtoi(s, &x))
<< "in_value=" << in_value << " s=" << s << " x=" << x;
EXPECT_EQ(exp_value, x);
x = static_cast<int_type>(~exp_value);
EXPECT_TRUE(SimpleAtoi(s.c_str(), &x));
EXPECT_EQ(exp_value, x);
}
template <typename int_type, typename in_val_type>
void VerifySimpleAtoiBad(in_val_type in_value) {
std::string s;
absl::strings_internal::OStringStream(&s) << in_value;
int_type x;
EXPECT_FALSE(SimpleAtoi(s, &x));
EXPECT_FALSE(SimpleAtoi(s.c_str(), &x));
}
TEST(NumbersTest, Atoi) {
VerifySimpleAtoiGood<int32_t>(0, 0);
VerifySimpleAtoiGood<int32_t>(42, 42);
VerifySimpleAtoiGood<int32_t>(-42, -42);
VerifySimpleAtoiGood<int32_t>(std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::min());
VerifySimpleAtoiGood<int32_t>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleAtoiGood<uint32_t>(0, 0);
VerifySimpleAtoiGood<uint32_t>(42, 42);
VerifySimpleAtoiBad<uint32_t>(-42);
VerifySimpleAtoiBad<uint32_t>(std::numeric_limits<int32_t>::min());
VerifySimpleAtoiGood<uint32_t>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleAtoiGood<uint32_t>(std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max());
VerifySimpleAtoiBad<uint32_t>(std::numeric_limits<int64_t>::min());
VerifySimpleAtoiBad<uint32_t>(std::numeric_limits<int64_t>::max());
VerifySimpleAtoiBad<uint32_t>(std::numeric_limits<uint64_t>::max());
VerifySimpleAtoiGood<int64_t>(0, 0);
VerifySimpleAtoiGood<int64_t>(42, 42);
VerifySimpleAtoiGood<int64_t>(-42, -42);
VerifySimpleAtoiGood<int64_t>(std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::min());
VerifySimpleAtoiGood<int64_t>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleAtoiGood<int64_t>(std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max());
VerifySimpleAtoiGood<int64_t>(std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::min());
VerifySimpleAtoiGood<int64_t>(std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max());
VerifySimpleAtoiBad<int64_t>(std::numeric_limits<uint64_t>::max());
VerifySimpleAtoiGood<uint64_t>(0, 0);
VerifySimpleAtoiGood<uint64_t>(42, 42);
VerifySimpleAtoiBad<uint64_t>(-42);
VerifySimpleAtoiBad<uint64_t>(std::numeric_limits<int32_t>::min());
VerifySimpleAtoiGood<uint64_t>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleAtoiGood<uint64_t>(std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max());
VerifySimpleAtoiBad<uint64_t>(std::numeric_limits<int64_t>::min());
VerifySimpleAtoiGood<uint64_t>(std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max());
VerifySimpleAtoiGood<uint64_t>(std::numeric_limits<uint64_t>::max(),
std::numeric_limits<uint64_t>::max());
VerifySimpleAtoiGood<absl::uint128>(0, 0);
VerifySimpleAtoiGood<absl::uint128>(42, 42);
VerifySimpleAtoiBad<absl::uint128>(-42);
VerifySimpleAtoiBad<absl::uint128>(std::numeric_limits<int32_t>::min());
VerifySimpleAtoiGood<absl::uint128>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleAtoiGood<absl::uint128>(std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max());
VerifySimpleAtoiBad<absl::uint128>(std::numeric_limits<int64_t>::min());
VerifySimpleAtoiGood<absl::uint128>(std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max());
VerifySimpleAtoiGood<absl::uint128>(std::numeric_limits<uint64_t>::max(),
std::numeric_limits<uint64_t>::max());
VerifySimpleAtoiGood<absl::uint128>(
std::numeric_limits<absl::uint128>::max(),
std::numeric_limits<absl::uint128>::max());
VerifySimpleAtoiGood<absl::int128>(0, 0);
VerifySimpleAtoiGood<absl::int128>(42, 42);
VerifySimpleAtoiGood<absl::int128>(-42, -42);
VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::min());
VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max());
VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::min());
VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max());
VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<uint64_t>::max(),
std::numeric_limits<uint64_t>::max());
VerifySimpleAtoiGood<absl::int128>(
std::numeric_limits<absl::int128>::min(),
std::numeric_limits<absl::int128>::min());
VerifySimpleAtoiGood<absl::int128>(
std::numeric_limits<absl::int128>::max(),
std::numeric_limits<absl::int128>::max());
VerifySimpleAtoiBad<absl::int128>(std::numeric_limits<absl::uint128>::max());
VerifySimpleAtoiGood<int>(-42, -42);
VerifySimpleAtoiGood<int32_t>(-42, -42);
VerifySimpleAtoiGood<uint32_t>(42, 42);
VerifySimpleAtoiGood<unsigned int>(42, 42);
VerifySimpleAtoiGood<int64_t>(-42, -42);
VerifySimpleAtoiGood<long>(-42, -42);
VerifySimpleAtoiGood<uint64_t>(42, 42);
VerifySimpleAtoiGood<size_t>(42, 42);
VerifySimpleAtoiGood<std::string::size_type>(42, 42);
}
TEST(NumbersTest, Atod) {
#if !defined(DBL_TRUE_MIN)
static constexpr double DBL_TRUE_MIN =
4.940656458412465441765687928682213723650598026143247644255856825e-324;
#endif
#if !defined(FLT_TRUE_MIN)
static constexpr float FLT_TRUE_MIN =
1.401298464324817070923729583289916131280261941876515771757068284e-45f;
#endif
double d;
float f;
EXPECT_TRUE(absl::SimpleAtod("NaN", &d));
EXPECT_TRUE(std::isnan(d));
EXPECT_TRUE(absl::SimpleAtod("nAN", &d));
EXPECT_TRUE(std::isnan(d));
EXPECT_TRUE(absl::SimpleAtod("-nan", &d));
EXPECT_TRUE(std::isnan(d));
EXPECT_TRUE(absl::SimpleAtod("inf", &d));
EXPECT_TRUE(std::isinf(d) && (d > 0));
EXPECT_TRUE(absl::SimpleAtod("+Infinity", &d));
EXPECT_TRUE(std::isinf(d) && (d > 0));
EXPECT_TRUE(absl::SimpleAtod("-INF", &d));
EXPECT_TRUE(std::isinf(d) && (d < 0));
EXPECT_TRUE(absl::SimpleAtod("1.7976931348623157e+308", &d));
EXPECT_EQ(d, 1.7976931348623157e+308);
EXPECT_TRUE(absl::SimpleAtod("5e308", &d));
EXPECT_TRUE(std::isinf(d) && (d > 0));
EXPECT_TRUE(absl::SimpleAtof("3.4028234663852886e+38", &f));
EXPECT_EQ(f, 3.4028234663852886e+38f);
EXPECT_TRUE(absl::SimpleAtof("7e38", &f));
EXPECT_TRUE(std::isinf(f) && (f > 0));
EXPECT_TRUE(absl::SimpleAtod("1e308", &d));
EXPECT_EQ(d, 1e308);
EXPECT_FALSE(std::isinf(d));
EXPECT_TRUE(absl::SimpleAtod("1e309", &d));
EXPECT_TRUE(std::isinf(d));
EXPECT_TRUE(absl::SimpleAtof("1e38", &f));
EXPECT_EQ(f, 1e38f);
EXPECT_FALSE(std::isinf(f));
EXPECT_TRUE(absl::SimpleAtof("1e39", &f));
EXPECT_TRUE(std::isinf(f));
EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e307", &d));
EXPECT_EQ(d, 9.999999999999999999e307);
EXPECT_FALSE(std::isinf(d));
EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e308", &d));
EXPECT_TRUE(std::isinf(d));
EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e37", &f));
EXPECT_EQ(f, 9.999999999999999999e37f);
EXPECT_FALSE(std::isinf(f));
EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e38", &f));
EXPECT_TRUE(std::isinf(f));
EXPECT_TRUE(absl::SimpleAtod("2.2250738585072014e-308", &d));
EXPECT_EQ(d, 2.2250738585072014e-308);
EXPECT_TRUE(absl::SimpleAtod("4.9406564584124654e-324", &d));
EXPECT_EQ(d, 4.9406564584124654e-324);
EXPECT_TRUE(absl::SimpleAtod("4.9406564584124654e-325", &d));
EXPECT_EQ(d, 0);
EXPECT_TRUE(absl::SimpleAtof("1.1754943508222875e-38", &f));
EXPECT_EQ(f, 1.1754943508222875e-38f);
EXPECT_TRUE(absl::SimpleAtof("1.4012984643248171e-45", &f));
EXPECT_EQ(f, 1.4012984643248171e-45f);
EXPECT_TRUE(absl::SimpleAtof("1.4012984643248171e-46", &f));
EXPECT_EQ(f, 0);
EXPECT_TRUE(absl::SimpleAtod("1e-307", &d));
EXPECT_EQ(d, 1e-307);
EXPECT_GE(d, DBL_MIN);
EXPECT_LT(d, DBL_MIN * 10);
EXPECT_TRUE(absl::SimpleAtod("1e-323", &d));
EXPECT_EQ(d, 1e-323);
EXPECT_GE(d, DBL_TRUE_MIN);
EXPECT_LT(d, DBL_TRUE_MIN * 10);
EXPECT_TRUE(absl::SimpleAtod("1e-324", &d));
EXPECT_EQ(d, 0);
EXPECT_TRUE(absl::SimpleAtof("1e-37", &f));
EXPECT_EQ(f, 1e-37f);
EXPECT_GE(f, FLT_MIN);
EXPECT_LT(f, FLT_MIN * 10);
EXPECT_TRUE(absl::SimpleAtof("1e-45", &f));
EXPECT_EQ(f, 1e-45f);
EXPECT_GE(f, FLT_TRUE_MIN);
EXPECT_LT(f, FLT_TRUE_MIN * 10);
EXPECT_TRUE(absl::SimpleAtof("1e-46", &f));
EXPECT_EQ(f, 0);
EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-308", &d));
EXPECT_EQ(d, 9.999999999999999999e-308);
EXPECT_GE(d, DBL_MIN);
EXPECT_LT(d, DBL_MIN * 10);
EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-324", &d));
EXPECT_EQ(d, 9.999999999999999999e-324);
EXPECT_GE(d, DBL_TRUE_MIN);
EXPECT_LT(d, DBL_TRUE_MIN * 10);
EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-325", &d));
EXPECT_EQ(d, 0);
EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-38", &f));
EXPECT_EQ(f, 9.999999999999999999e-38f);
EXPECT_GE(f, FLT_MIN);
EXPECT_LT(f, FLT_MIN * 10);
EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-46", &f));
EXPECT_EQ(f, 9.999999999999999999e-46f);
EXPECT_GE(f, FLT_TRUE_MIN);
EXPECT_LT(f, FLT_TRUE_MIN * 10);
EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-47", &f));
EXPECT_EQ(f, 0);
EXPECT_TRUE(absl::SimpleAtod(" \t\r\n 2.718", &d));
EXPECT_EQ(d, 2.718);
EXPECT_TRUE(absl::SimpleAtod(" 3.141 ", &d));
EXPECT_EQ(d, 3.141);
EXPECT_FALSE(absl::SimpleAtod("n 0", &d));
EXPECT_FALSE(absl::SimpleAtod("0n ", &d));
EXPECT_TRUE(absl::SimpleAtod("000123", &d));
EXPECT_EQ(d, 123);
EXPECT_TRUE(absl::SimpleAtod("000.456", &d));
EXPECT_EQ(d, 0.456);
EXPECT_TRUE(absl::SimpleAtod(".5", &d));
EXPECT_EQ(d, 0.5);
EXPECT_TRUE(absl::SimpleAtod("-.707", &d));
EXPECT_EQ(d, -0.707);
EXPECT_TRUE(absl::SimpleAtod("+6.0221408e+23", &d));
EXPECT_EQ(d, 6.0221408e+23);
EXPECT_FALSE(absl::SimpleAtod("123_456", &d));
EXPECT_TRUE(absl::SimpleAtod("8.9", &d));
EXPECT_FALSE(absl::SimpleAtod("8,9", &d));
EXPECT_TRUE(absl::SimpleAtod("4503599627370497.5", &d));
EXPECT_EQ(d, 4503599627370497.5);
EXPECT_TRUE(absl::SimpleAtod("1e+23", &d));
EXPECT_EQ(d, 1e+23);
EXPECT_TRUE(absl::SimpleAtod("9223372036854775807", &d));
EXPECT_EQ(d, 9223372036854775807);
EXPECT_TRUE(absl::SimpleAtof("0.0625", &f));
EXPECT_EQ(f, 0.0625f);
EXPECT_TRUE(absl::SimpleAtof("20040229.0", &f));
EXPECT_EQ(f, 20040229.0f);
EXPECT_TRUE(absl::SimpleAtof("2147483647.0", &f));
EXPECT_EQ(f, 2147483647.0f);
EXPECT_TRUE(absl::SimpleAtod("122.416294033786585", &d));
EXPECT_EQ(d, 122.416294033786585);
EXPECT_TRUE(absl::SimpleAtof("122.416294033786585", &f));
EXPECT_EQ(f, 122.416294033786585f);
}
TEST(NumbersTest, Prefixes) {
double d;
EXPECT_FALSE(absl::SimpleAtod("++1", &d));
EXPECT_FALSE(absl::SimpleAtod("+-1", &d));
EXPECT_FALSE(absl::SimpleAtod("-+1", &d));
EXPECT_FALSE(absl::SimpleAtod("--1", &d));
EXPECT_TRUE(absl::SimpleAtod("-1", &d));
EXPECT_EQ(d, -1.);
EXPECT_TRUE(absl::SimpleAtod("+1", &d));
EXPECT_EQ(d, +1.);
float f;
EXPECT_FALSE(absl::SimpleAtof("++1", &f));
EXPECT_FALSE(absl::SimpleAtof("+-1", &f));
EXPECT_FALSE(absl::SimpleAtof("-+1", &f));
EXPECT_FALSE(absl::SimpleAtof("--1", &f));
EXPECT_TRUE(absl::SimpleAtof("-1", &f));
EXPECT_EQ(f, -1.f);
EXPECT_TRUE(absl::SimpleAtof("+1", &f));
EXPECT_EQ(f, +1.f);
}
TEST(NumbersTest, Atoenum) {
enum E01 {
E01_zero = 0,
E01_one = 1,
};
VerifySimpleAtoiGood<E01>(E01_zero, E01_zero);
VerifySimpleAtoiGood<E01>(E01_one, E01_one);
enum E_101 {
E_101_minusone = -1,
E_101_zero = 0,
E_101_one = 1,
};
VerifySimpleAtoiGood<E_101>(E_101_minusone, E_101_minusone);
VerifySimpleAtoiGood<E_101>(E_101_zero, E_101_zero);
VerifySimpleAtoiGood<E_101>(E_101_one, E_101_one);
enum E_bigint {
E_bigint_zero = 0,
E_bigint_one = 1,
E_bigint_max31 = static_cast<int32_t>(0x7FFFFFFF),
};
VerifySimpleAtoiGood<E_bigint>(E_bigint_zero, E_bigint_zero);
VerifySimpleAtoiGood<E_bigint>(E_bigint_one, E_bigint_one);
VerifySimpleAtoiGood<E_bigint>(E_bigint_max31, E_bigint_max31);
enum E_fullint {
E_fullint_zero = 0,
E_fullint_one = 1,
E_fullint_max31 = static_cast<int32_t>(0x7FFFFFFF),
E_fullint_min32 = INT32_MIN,
};
VerifySimpleAtoiGood<E_fullint>(E_fullint_zero, E_fullint_zero);
VerifySimpleAtoiGood<E_fullint>(E_fullint_one, E_fullint_one);
VerifySimpleAtoiGood<E_fullint>(E_fullint_max31, E_fullint_max31);
VerifySimpleAtoiGood<E_fullint>(E_fullint_min32, E_fullint_min32);
enum E_biguint {
E_biguint_zero = 0,
E_biguint_one = 1,
E_biguint_max31 = static_cast<uint32_t>(0x7FFFFFFF),
E_biguint_max32 = static_cast<uint32_t>(0xFFFFFFFF),
};
VerifySimpleAtoiGood<E_biguint>(E_biguint_zero, E_biguint_zero);
VerifySimpleAtoiGood<E_biguint>(E_biguint_one, E_biguint_one);
VerifySimpleAtoiGood<E_biguint>(E_biguint_max31, E_biguint_max31);
VerifySimpleAtoiGood<E_biguint>(E_biguint_max32, E_biguint_max32);
}
template <typename int_type, typename in_val_type>
void VerifySimpleHexAtoiGood(in_val_type in_value, int_type exp_value) {
std::string s;
absl::strings_internal::OStringStream strm(&s);
if (in_value >= 0) {
strm << std::hex << in_value;
} else {
strm << "-" << std::hex << -absl::uint128(in_value);
}
int_type x = static_cast<int_type>(~exp_value);
EXPECT_TRUE(SimpleHexAtoi(s, &x))
<< "in_value=" << std::hex << in_value << " s=" << s << " x=" << x;
EXPECT_EQ(exp_value, x);
x = static_cast<int_type>(~exp_value);
EXPECT_TRUE(SimpleHexAtoi(
s.c_str(), &x));
EXPECT_EQ(exp_value, x);
}
template <typename int_type, typename in_val_type>
void VerifySimpleHexAtoiBad(in_val_type in_value) {
std::string s;
absl::strings_internal::OStringStream strm(&s);
if (in_value >= 0) {
strm << std::hex << in_value;
} else {
strm << "-" << std::hex << -absl::uint128(in_value);
}
int_type x;
EXPECT_FALSE(SimpleHexAtoi(s, &x));
EXPECT_FALSE(SimpleHexAtoi(
s.c_str(), &x));
}
TEST(NumbersTest, HexAtoi) {
VerifySimpleHexAtoiGood<int32_t>(0, 0);
VerifySimpleHexAtoiGood<int32_t>(0x42, 0x42);
VerifySimpleHexAtoiGood<int32_t>(-0x42, -0x42);
VerifySimpleHexAtoiGood<int32_t>(std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::min());
VerifySimpleHexAtoiGood<int32_t>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleHexAtoiGood<uint32_t>(0, 0);
VerifySimpleHexAtoiGood<uint32_t>(0x42, 0x42);
VerifySimpleHexAtoiBad<uint32_t>(-0x42);
VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<int32_t>::min());
VerifySimpleHexAtoiGood<uint32_t>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleHexAtoiGood<uint32_t>(std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max());
VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<int64_t>::min());
VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<int64_t>::max());
VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<uint64_t>::max());
VerifySimpleHexAtoiGood<int64_t>(0, 0);
VerifySimpleHexAtoiGood<int64_t>(0x42, 0x42);
VerifySimpleHexAtoiGood<int64_t>(-0x42, -0x42);
VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::min());
VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max());
VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::min());
VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max());
VerifySimpleHexAtoiBad<int64_t>(std::numeric_limits<uint64_t>::max());
VerifySimpleHexAtoiGood<uint64_t>(0, 0);
VerifySimpleHexAtoiGood<uint64_t>(0x42, 0x42);
VerifySimpleHexAtoiBad<uint64_t>(-0x42);
VerifySimpleHexAtoiBad<uint64_t>(std::numeric_limits<int32_t>::min());
VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max());
VerifySimpleHexAtoiBad<uint64_t>(std::numeric_limits<int64_t>::min());
VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max());
VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<uint64_t>::max(),
std::numeric_limits<uint64_t>::max());
VerifySimpleHexAtoiGood<absl::uint128>(0, 0);
VerifySimpleHexAtoiGood<absl::uint128>(0x42, 0x42);
VerifySimpleHexAtoiBad<absl::uint128>(-0x42);
VerifySimpleHexAtoiBad<absl::uint128>(std::numeric_limits<int32_t>::min());
VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max());
VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max());
VerifySimpleHexAtoiBad<absl::uint128>(std::numeric_limits<int64_t>::min());
VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max());
VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<uint64_t>::max(),
std::numeric_limits<uint64_t>::max());
VerifySimpleHexAtoiGood<absl::uint128>(
std::numeric_limits<absl::uint128>::max(),
std::numeric_limits<absl::uint128>::max());
VerifySimpleHexAtoiGood<int>(-0x42, -0x42);
VerifySimpleHexAtoiGood<int32_t>(-0x42, -0x42);
VerifySimpleHexAtoiGood<uint32_t>(0x42, 0x42);
VerifySimpleHexAtoiGood<unsigned int>(0x42, 0x42);
VerifySimpleHexAtoiGood<int64_t>(-0x42, -0x42);
VerifySimpleHexAtoiGood<long>(-0x42, -0x42);
VerifySimpleHexAtoiGood<uint64_t>(0x42, 0x42);
VerifySimpleHexAtoiGood<size_t>(0x42, 0x42);
VerifySimpleHexAtoiGood<std::string::size_type>(0x42, 0x42);
int32_t value;
EXPECT_TRUE(safe_strto32_base("0x34234324", &value, 16));
EXPECT_EQ(0x34234324, value);
EXPECT_TRUE(safe_strto32_base("0X34234324", &value, 16));
EXPECT_EQ(0x34234324, value);
EXPECT_TRUE(safe_strto32_base(" \t\n 34234324", &value, 16));
EXPECT_EQ(0x34234324, value);
EXPECT_TRUE(safe_str | absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
uint32_t n, absl::Nonnull<char*> out_str) {
out_str = EncodeFullU32(n, out_str);
*out_str = '\0';
return out_str;
} | TEST(Numbers, TestFastPrints) {
for (int i = -100; i <= 100; i++) {
CheckInt32(i);
CheckInt64(i);
}
for (int i = 0; i <= 100; i++) {
CheckUInt32(i);
CheckUInt64(i);
}
CheckInt32(INT_MIN);
CheckInt32(INT_MAX);
CheckInt64(LONG_MIN);
CheckInt64(uint64_t{1000000000});
CheckInt64(uint64_t{9999999999});
CheckInt64(uint64_t{100000000000000});
CheckInt64(uint64_t{999999999999999});
CheckInt64(uint64_t{1000000000000000000});
CheckInt64(uint64_t{1199999999999999999});
CheckInt64(int64_t{-700000000000000000});
CheckInt64(LONG_MAX);
CheckUInt32(std::numeric_limits<uint32_t>::max());
CheckUInt64(uint64_t{1000000000});
CheckUInt64(uint64_t{9999999999});
CheckUInt64(uint64_t{100000000000000});
CheckUInt64(uint64_t{999999999999999});
CheckUInt64(uint64_t{1000000000000000000});
CheckUInt64(uint64_t{1199999999999999999});
CheckUInt64(std::numeric_limits<uint64_t>::max());
for (int i = 0; i < 10000; i++) {
CheckHex64(i);
}
CheckHex64(uint64_t{0x123456789abcdef0});
} |
#include "xla/service/gpu/reduction_splitter.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class ReductionSplitterVisitor : public DfsHloRewriteVisitor {
public:
explicit ReductionSplitterVisitor(bool ignore_small_dims)
: ignore_small_dims_(ignore_small_dims) {}
absl::Status HandleReduce(HloInstruction *reduce) override {
VLOG(4) << "Input: " << reduce->ToString();
if (IsReductionFromOrToContiguousDimensions(*reduce)) {
VLOG(4) << "Reduction with contiguous dimensions. Return.";
return absl::OkStatus();
}
if (reduce->dimensions().size() < 2) {
return absl::OkStatus();
}
if (!reduce->shape().IsArray()) {
return absl::OkStatus();
}
HloInstruction *operand = reduce->mutable_operand(0);
const Shape &shape = operand->shape();
CHECK(shape == LayoutUtil::GetWithDefaultLayout(shape))
<< "Default layout should be enforced on reduction operand";
for (int64_t i = 0; i < reduce->dimensions().size(); ++i) {
for (int64_t j = i + 1; j < reduce->dimensions().size(); ++j) {
CHECK(abs(reduce->dimensions(i) - reduce->dimensions(j)) > 1)
<< "Reduction dimensions must not be consecutive";
}
}
int64_t max_shape_dim = 0;
int64_t max_reduce_dim = 0;
const auto &input_shape = reduce->operand(0)->shape();
for (int64_t i = 0; i < reduce->dimensions().size(); ++i) {
if (input_shape.dimensions(reduce->dimensions(i)) > max_shape_dim) {
max_reduce_dim = reduce->dimensions(i);
max_shape_dim = input_shape.dimensions(max_reduce_dim);
}
}
if (ignore_small_dims_ && max_shape_dim <= 8) {
return absl::OkStatus();
}
VLOG(3) << "Splitting reduction " << reduce->name() << " at dimension "
<< max_reduce_dim;
std::vector<int64_t> pre_reduce_dims;
pre_reduce_dims.push_back(max_reduce_dim);
std::vector<int64_t> pre_reduce_shape_dims(input_shape.dimensions().begin(),
input_shape.dimensions().end());
pre_reduce_shape_dims.erase(pre_reduce_shape_dims.begin() + max_reduce_dim);
Shape pre_reduce_shape = ShapeUtil::MakeShape(
reduce->shape().element_type(), pre_reduce_shape_dims);
std::unique_ptr<HloInstruction> pre_reduce = HloInstruction::CreateReduce(
pre_reduce_shape, reduce->mutable_operand(0),
reduce->mutable_operand(1), pre_reduce_dims, reduce->to_apply());
pre_reduce->set_metadata(reduce->metadata());
std::vector<int64_t> final_reduce_dims(reduce->dimensions().begin(),
reduce->dimensions().end());
final_reduce_dims.erase(
std::remove(final_reduce_dims.begin(), final_reduce_dims.end(),
max_reduce_dim),
final_reduce_dims.end());
for (int64_t i = 0; i < final_reduce_dims.size(); ++i) {
if (final_reduce_dims[i] > max_reduce_dim) {
final_reduce_dims[i]--;
}
}
std::unique_ptr<HloInstruction> final_reduce = HloInstruction::CreateReduce(
reduce->shape(),
reduce->parent()->AddInstruction(std::move(pre_reduce)),
reduce->mutable_operand(1), final_reduce_dims, reduce->to_apply());
return ReplaceWithNewInstruction(reduce, std::move(final_reduce));
}
private:
bool ignore_small_dims_;
};
absl::StatusOr<bool> ReductionSplitter::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
ReductionSplitterVisitor(ignore_small_dims_)
.RunOnModule(module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/reduction_splitter.h"
#include <cstdint>
#include <vector>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class ReductionSplitterTest : public HloTestBase {};
TEST_F(ReductionSplitterTest, SplitReductionAtDimensionTwo) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f16[6,16,512,64]{3,2,1,0} parameter(0)
transpose.1781 = f16[6,512,16,64]{3,1,2,0} transpose(param_0), dimensions={0,2,1,3}
convert.6986 = f32[6,512,16,64]{3,1,2,0} convert(transpose.1781)
bitcast.2136 = f32[6,16,512,64]{3,2,1,0} bitcast(convert.6986)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,64]{1,0} reduce(bitcast.2136, constant_11111), dimensions={0,2}, to_apply=add_computation
}
)")
.value();
ASSERT_TRUE(
ReductionSplitter(true).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root_reduction =
module->entry_computation()->root_instruction();
ASSERT_THAT(root_reduction,
GmockMatch(m::Reduce(m::Reduce(), m::Constant())));
auto* pre_reduction = root_reduction->operand(0);
EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({2}));
EXPECT_THAT(pre_reduction->shape(), ShapeUtil::MakeShape(F32, {6, 16, 64}));
EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({0}));
EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64}));
}
TEST_F(ReductionSplitterTest, SplitReductionAtDimensionZero) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[1024,16,512,64,128]{4,3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,64]{1,0} reduce(param_0, constant_11111), dimensions={2,0,4}, to_apply=add_computation
}
)")
.value();
ASSERT_TRUE(
ReductionSplitter(false).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root_reduction =
module->entry_computation()->root_instruction();
ASSERT_THAT(root_reduction,
GmockMatch(m::Reduce(m::Reduce(), m::Constant())));
auto* pre_reduction = root_reduction->operand(0);
EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({0}));
EXPECT_THAT(pre_reduction->shape(),
ShapeUtil::MakeShape(F32, {16, 512, 64, 128}));
EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({1, 3}));
EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64}));
}
TEST_F(ReductionSplitterTest, DontSplitReductionWithSmallDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[16,8,1024,8]{3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,1024]{1,0} reduce(param_0, constant_11111), dimensions={3,1}, to_apply=add_computation
}
)")
.value();
EXPECT_FALSE(
ReductionSplitter(true).Run(module.get()).value());
EXPECT_TRUE(
ReductionSplitter(false).Run(module.get()).value());
}
TEST_F(ReductionSplitterTest, DontSplitReductionsWithContiguousDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[128,128,64,128]{3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[128,64]{1,0} reduce(param_0, constant_11111), dimensions={3,0}, to_apply=add_computation
}
)")
.value();
EXPECT_FALSE(
ReductionSplitter(false).Run(module.get()).value());
}
}
}
} | absl::StatusOr<bool> ReductionSplitter::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
ReductionSplitterVisitor(ignore_small_dims_)
.RunOnModule(module, execution_threads));
return changed;
} | TEST_F(ReductionSplitterTest, SplitReductionAtDimensionTwo) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f16[6,16,512,64]{3,2,1,0} parameter(0)
transpose.1781 = f16[6,512,16,64]{3,1,2,0} transpose(param_0), dimensions={0,2,1,3}
convert.6986 = f32[6,512,16,64]{3,1,2,0} convert(transpose.1781)
bitcast.2136 = f32[6,16,512,64]{3,2,1,0} bitcast(convert.6986)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,64]{1,0} reduce(bitcast.2136, constant_11111), dimensions={0,2}, to_apply=add_computation
}
)")
.value();
ASSERT_TRUE(
ReductionSplitter(true).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root_reduction =
module->entry_computation()->root_instruction();
ASSERT_THAT(root_reduction,
GmockMatch(m::Reduce(m::Reduce(), m::Constant())));
auto* pre_reduction = root_reduction->operand(0);
EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({2}));
EXPECT_THAT(pre_reduction->shape(), ShapeUtil::MakeShape(F32, {6, 16, 64}));
EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({0}));
EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64}));
}
TEST_F(ReductionSplitterTest, SplitReductionAtDimensionZero) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[1024,16,512,64,128]{4,3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,64]{1,0} reduce(param_0, constant_11111), dimensions={2,0,4}, to_apply=add_computation
}
)")
.value();
ASSERT_TRUE(
ReductionSplitter(false).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root_reduction =
module->entry_computation()->root_instruction();
ASSERT_THAT(root_reduction,
GmockMatch(m::Reduce(m::Reduce(), m::Constant())));
auto* pre_reduction = root_reduction->operand(0);
EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({0}));
EXPECT_THAT(pre_reduction->shape(),
ShapeUtil::MakeShape(F32, {16, 512, 64, 128}));
EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({1, 3}));
EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64}));
}
TEST_F(ReductionSplitterTest, DontSplitReductionWithSmallDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[16,8,1024,8]{3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,1024]{1,0} reduce(param_0, constant_11111), dimensions={3,1}, to_apply=add_computation
}
)")
.value();
EXPECT_FALSE(
ReductionSplitter(true).Run(module.get()).value());
EXPECT_TRUE(
ReductionSplitter(false).Run(module.get()).value());
}
TEST_F(ReductionSplitterTest, DontSplitReductionsWithContiguousDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[128,128,64,128]{3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[128,64]{1,0} reduce(param_0, constant_11111), dimensions={3,0}, to_apply=add_computation
}
)")
.value();
EXPECT_FALSE(
ReductionSplitter(false).Run(module.get()).value());
} |
#include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <optional>
#include <string>
#include "absl/base/nullability.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
namespace mlir::quant::stablehlo {
namespace {
std::optional<std::string> OptionalStringViewToOptionalString(
std::optional<absl::string_view> view) {
if (view == std::nullopt) return std::nullopt;
return std::make_optional<std::string>(*view);
}
bool IsQuantizeCompositeFunctionPass(absl::Nullable<Pass*> pass,
absl::Nullable<Operation*> op) {
return pass != nullptr &&
pass->getArgument() == "stablehlo-quantize-composite-functions" &&
isa_and_nonnull<ModuleOp>(op);
}
bool ShouldSaveReport(absl::Nullable<Pass*> pass, absl::Nullable<Operation*> op,
const std::optional<std::string>& file_path) {
return file_path != std::nullopt && IsQuantizeCompositeFunctionPass(pass, op);
}
void SaveReport(const QuantizationReport& report,
const absl::string_view file_path) {
if (const absl::Status save_status = report.Save(file_path);
save_status.ok()) {
LOG(INFO) << "Successfully saved quantization report to: " << file_path;
} else {
LOG(ERROR) << "Failed to save quantization report to: " << file_path
<< " with status: " << save_status;
}
}
}
SaveQuantizationReportInstrumentation::SaveQuantizationReportInstrumentation(
std::optional<absl::string_view> file_path)
: file_path_(OptionalStringViewToOptionalString(file_path)) {}
void SaveQuantizationReportInstrumentation::runAfterPass(Pass* pass,
Operation* op) {
if (!IsQuantizeCompositeFunctionPass(pass, op)) return;
auto module_op = cast<ModuleOp>(op);
const QuantizationReport report(module_op);
report.Print();
if (!ShouldSaveReport(pass, op, file_path_)) return;
SaveReport(report, *file_path_);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::ReadFileToString;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
using SaveQuantizationReportInstrumentationTest = QuantizationTestBase;
TEST_F(SaveQuantizationReportInstrumentationTest, SaveReport) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "tf.Const"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
const std::string report_file_path =
absl::StrCat(testing::TempDir(), "/save_report.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
const absl::StatusOr<std::string> file_data =
ReadFileToString(report_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenNoQuantizeCompositeFunctionsPass) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
pm.addPass(createPrepareQuantizePass());
const std::string report_file_path = absl::StrCat(
testing::TempDir(),
"/report_not_saved_no_quantize_composite_functions_pass.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
EXPECT_THAT(ReadFileToString(report_file_path),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenReportFilePathIsNullopt) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
std::nullopt));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
}
}
} | void SaveReport(const QuantizationReport& report,
const absl::string_view file_path) {
if (const absl::Status save_status = report.Save(file_path);
save_status.ok()) {
LOG(INFO) << "Successfully saved quantization report to: " << file_path;
} else {
LOG(ERROR) << "Failed to save quantization report to: " << file_path
<< " with status: " << save_status;
}
} | TEST_F(SaveQuantizationReportInstrumentationTest, SaveReport) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "tf.Const"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
const std::string report_file_path =
absl::StrCat(testing::TempDir(), "/save_report.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
const absl::StatusOr<std::string> file_data =
ReadFileToString(report_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
} |
#include "xla/parse_flags_from_env.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/tsl/util/command_line_flags.h"
#include "tsl/platform/logging.h"
namespace xla {
static const char kWS[] = " \t\r\n";
namespace {
struct FreeDeleter {
void operator()(char* ptr) { free(ptr); }
};
struct EnvArgv {
EnvArgv() : initialized(false), argc(0) {}
bool initialized;
int argc;
std::vector<char*> argv;
std::vector<std::unique_ptr<char, FreeDeleter>> argv_save;
};
}
static void AppendToEnvArgv(const char* s0, size_t s0len, const char* s1,
size_t s1len, EnvArgv* a) {
if (s0 == nullptr) {
a->argv.push_back(nullptr);
a->argv_save.push_back(nullptr);
} else {
std::string s = std::string(s0, s0len) + std::string(s1, s1len);
char* str = strdup(s.c_str());
a->argv.push_back(str);
a->argv_save.emplace_back(str);
a->argc++;
}
}
static size_t FindFirstOf(const std::string& s, const char* x, size_t pos) {
size_t result = s.find_first_of(x, pos);
return result == std::string::npos ? s.size() : result;
}
static size_t FindFirstNotOf(const std::string& s, const char* x, size_t pos) {
size_t result = s.find_first_not_of(x, pos);
return result == std::string::npos ? s.size() : result;
}
static void ParseArgvFromString(const std::string& flag_str, EnvArgv* a) {
size_t b = FindFirstNotOf(flag_str, kWS, 0);
while (b != flag_str.size() && flag_str[b] == '-') {
size_t e = b;
while (e != flag_str.size() && isascii(flag_str[e]) &&
(strchr("-_", flag_str[e]) != nullptr ||
absl::ascii_isalnum(flag_str[e]))) {
e++;
}
if (e != flag_str.size() && flag_str[e] == '=' &&
e + 1 != flag_str.size() && strchr("'\"", flag_str[e + 1]) != nullptr) {
int c;
e++;
size_t eflag = e;
char quote = flag_str[e];
e++;
std::string value;
for (; e != flag_str.size() && (c = flag_str[e]) != quote; e++) {
if (quote == '"' && c == '\\' && e + 1 != flag_str.size()) {
e++;
c = flag_str[e];
}
value += c;
}
if (e != flag_str.size()) {
e++;
}
AppendToEnvArgv(flag_str.data() + b, eflag - b, value.data(),
value.size(), a);
} else {
e = FindFirstOf(flag_str, kWS, e);
AppendToEnvArgv(flag_str.data() + b, e - b, "", 0, a);
}
b = FindFirstNotOf(flag_str, kWS, e);
}
}
static void SetArgvFromEnv(absl::string_view envvar, EnvArgv* a) {
if (!a->initialized) {
static const char kDummyArgv[] = "<argv[0]>";
AppendToEnvArgv(kDummyArgv, strlen(kDummyArgv), nullptr, 0,
a);
const char* env = getenv(std::string(envvar).c_str());
if (env == nullptr || env[0] == '\0') {
} else if (env[strspn(env, kWS)] == '-') {
ParseArgvFromString(env, a);
} else {
FILE* fp = fopen(env, "r");
if (fp != nullptr) {
std::string str;
char buf[512];
int n;
while ((n = fread(buf, 1, sizeof(buf), fp)) > 0) {
str.append(buf, n);
}
fclose(fp);
ParseArgvFromString(str, a);
} else {
LOG(QFATAL)
<< "Could not open file \"" << env
<< "\" to read flags for environment variable \"" << envvar
<< "\". (We assumed \"" << env
<< "\" was a file name because it did not start with a \"--\".)";
}
}
AppendToEnvArgv(nullptr, 0, nullptr, 0, a);
a->initialized = true;
}
}
static absl::flat_hash_map<std::string, EnvArgv>& EnvArgvs() {
static auto* env_argvs = new absl::flat_hash_map<std::string, EnvArgv>();
return *env_argvs;
}
static absl::Mutex env_argv_mu(absl::kConstInit);
static void DieIfEnvHasUnknownFlagsLeft(absl::string_view envvar);
void ParseFlagsFromEnvAndDieIfUnknown(absl::string_view envvar,
const std::vector<tsl::Flag>& flag_list) {
ParseFlagsFromEnvAndIgnoreUnknown(envvar, flag_list);
DieIfEnvHasUnknownFlagsLeft(envvar);
}
void ParseFlagsFromEnvAndIgnoreUnknown(
absl::string_view envvar, const std::vector<tsl::Flag>& flag_list) {
absl::MutexLock lock(&env_argv_mu);
auto* env_argv = &EnvArgvs()[envvar];
SetArgvFromEnv(envvar, env_argv);
if (VLOG_IS_ON(1)) {
VLOG(1) << "For env var " << envvar << " found arguments:";
for (int i = 0; i < env_argv->argc; i++) {
VLOG(1) << " argv[" << i << "] = " << env_argv->argv[i];
}
}
QCHECK(tsl::Flags::Parse(&env_argv->argc, env_argv->argv.data(), flag_list))
<< "Flag parsing failed.\n"
<< tsl::Flags::Usage(getenv(std::string(envvar).c_str()), flag_list);
}
static void DieIfEnvHasUnknownFlagsLeft(absl::string_view envvar) {
absl::MutexLock lock(&env_argv_mu);
auto* env_argv = &EnvArgvs()[envvar];
SetArgvFromEnv(envvar, env_argv);
if (env_argv->argc != 1) {
auto unknown_flags = absl::MakeSpan(env_argv->argv);
unknown_flags.remove_prefix(1);
LOG(QFATAL) << "Unknown flag" << (unknown_flags.size() > 1 ? "s" : "")
<< " in " << envvar << ": "
<< absl::StrJoin(unknown_flags, " ");
}
}
void ResetFlagsFromEnvForTesting(absl::string_view envvar, int** pargc,
std::vector<char*>** pargv) {
absl::MutexLock lock(&env_argv_mu);
EnvArgvs().erase(envvar);
auto& env_argv = EnvArgvs()[envvar];
*pargc = &env_argv.argc;
*pargv = &env_argv.argv;
}
} | #include "xla/parse_flags_from_env.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include "absl/strings/str_format.h"
#include "xla/tsl/util/command_line_flags.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/subprocess.h"
#include "tsl/platform/test.h"
namespace xla {
static void TestParseFlagsFromEnv(const char* msg) {
int* pargc;
std::vector<char*>* pargv;
ResetFlagsFromEnvForTesting("TF_XLA_FLAGS", &pargc, &pargv);
bool simple = false;
std::string with_value;
std::string embedded_quotes;
std::string single_quoted;
std::string double_quoted;
std::vector<tsl::Flag> flag_list = {
tsl::Flag("simple", &simple, ""),
tsl::Flag("with_value", &with_value, ""),
tsl::Flag("embedded_quotes", &embedded_quotes, ""),
tsl::Flag("single_quoted", &single_quoted, ""),
tsl::Flag("double_quoted", &double_quoted, ""),
};
ParseFlagsFromEnvAndDieIfUnknown("TF_XLA_FLAGS", flag_list);
CHECK_EQ(*pargc, 1) << msg;
const std::vector<char*>& argv_second = *pargv;
CHECK_NE(argv_second[0], nullptr) << msg;
CHECK_EQ(argv_second[1], nullptr) << msg;
CHECK(simple) << msg;
CHECK_EQ(with_value, "a_value") << msg;
CHECK_EQ(embedded_quotes, "single'double\"") << msg;
CHECK_EQ(single_quoted, "single quoted \\\\ \n \"") << msg;
CHECK_EQ(double_quoted, "double quoted \\ \n '\"") << msg;
}
static const char kTestFlagString[] =
"--simple "
"--with_value=a_value "
"--embedded_quotes=single'double\" "
"--single_quoted='single quoted \\\\ \n \"' "
"--double_quoted=\"double quoted \\\\ \n '\\\"\" ";
TEST(ParseFlagsFromEnv, Basic) {
tsl::setenv("TF_XLA_FLAGS", kTestFlagString, true );
TestParseFlagsFromEnv("(flags in environment variable)");
}
TEST(ParseFlagsFromEnv, File) {
static const char* kTempVars[] = {"TEST_TMPDIR", "TMP"};
static const char kTempDir[] = "/tmp";
const char* tmp_dir = nullptr;
for (int i = 0; i != TF_ARRAYSIZE(kTempVars) && tmp_dir == nullptr; i++) {
tmp_dir = getenv(kTempVars[i]);
}
if (tmp_dir == nullptr) {
tmp_dir = kTempDir;
}
std::string tmp_file =
absl::StrFormat("%s/parse_flags_from_env.%d", tmp_dir, getpid());
FILE* fp = fopen(tmp_file.c_str(), "w");
CHECK_NE(fp, nullptr) << "can't write to " << tmp_file;
for (int i = 0; kTestFlagString[i] != '\0'; i++) {
putc(kTestFlagString[i], fp);
}
fflush(fp);
CHECK_EQ(ferror(fp), 0) << "writes failed to " << tmp_file;
fclose(fp);
tsl::setenv("TF_XLA_FLAGS", tmp_file.c_str(), true );
TestParseFlagsFromEnv("(flags in file)");
unlink(tmp_file.c_str());
}
static const char* binary_name;
TEST(ParseFlagsFromEnv, EnvAndFlag) {
static struct {
const char* env;
const char* arg;
const char* expected_value;
} test[] = {
{nullptr, nullptr, "1\n"},
{nullptr, "--int_flag=2", "2\n"},
{"--int_flag=3", nullptr, "3\n"},
{"--int_flag=3", "--int_flag=2", "2\n"},
};
for (int i = 0; i != TF_ARRAYSIZE(test); i++) {
if (test[i].env == nullptr) {
tsl::unsetenv("TF_XLA_FLAGS");
} else {
tsl::setenv("TF_XLA_FLAGS", test[i].env, true);
}
tsl::SubProcess child;
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--recursing");
if (test[i].arg != nullptr) {
argv.push_back(test[i].arg);
}
child.SetProgram(binary_name, argv);
child.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
CHECK(child.Start()) << "test " << i;
std::string stdout_str;
std::string stderr_str;
int child_status = child.Communicate(nullptr, &stdout_str, &stderr_str);
CHECK_EQ(child_status, 0) << "test " << i << "\nstdout\n"
<< stdout_str << "\nstderr\n"
<< stderr_str;
stdout_str.erase(std::remove(stdout_str.begin(), stdout_str.end(), '\r'),
stdout_str.end());
CHECK_EQ(stdout_str, test[i].expected_value) << "test " << i;
}
}
TEST(ParseFlagsFromEnv, ErrorOutOnFlagFailure) {
const char* env = "--int_flag=3parsefailure";
if (env == nullptr) {
tsl::unsetenv("TF_XLA_FLAGS");
} else {
tsl::setenv("TF_XLA_FLAGS", env, true);
}
tsl::SubProcess child;
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--recursing");
child.SetProgram(binary_name, argv);
child.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
EXPECT_TRUE(child.Start());
std::string stdout_str;
std::string stderr_str;
int child_status = child.Communicate(nullptr, &stdout_str, &stderr_str);
EXPECT_NE(child_status, 0);
}
TEST(ParseFlagsFromEnv, ErrorOutOnUnknownFlag) {
const char* env = "--int_flag=3 --unknown_flag=value";
if (env == nullptr) {
tsl::unsetenv("TF_XLA_FLAGS");
} else {
tsl::setenv("TF_XLA_FLAGS", env, true);
}
tsl::SubProcess child;
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--recursing");
child.SetProgram(binary_name, argv);
child.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
EXPECT_TRUE(child.Start());
std::string stdout_str;
std::string stderr_str;
int child_status = child.Communicate(nullptr, &stdout_str, &stderr_str);
EXPECT_NE(child_status, 0);
}
}
int main(int argc, char* argv[]) {
xla::binary_name = argv[0];
bool recursing = false;
int32_t int_flag = 1;
const std::vector<tsl::Flag> flag_list = {
tsl::Flag("recursing", &recursing,
"Whether the binary is being invoked recursively."),
tsl::Flag("int_flag", &int_flag, "An integer flag to test with"),
};
std::string usage = tsl::Flags::Usage(argv[0], flag_list);
xla::ParseFlagsFromEnvAndDieIfUnknown("TF_XLA_FLAGS", flag_list);
tsl::Flags::Parse(&argc, argv, flag_list);
if (recursing) {
printf("%d\n", int_flag);
exit(0);
}
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | void ParseFlagsFromEnvAndIgnoreUnknown(
absl::string_view envvar, const std::vector<tsl::Flag>& flag_list) {
absl::MutexLock lock(&env_argv_mu);
auto* env_argv = &EnvArgvs()[envvar];
SetArgvFromEnv(envvar, env_argv);
if (VLOG_IS_ON(1)) {
VLOG(1) << "For env var " << envvar << " found arguments:";
for (int i = 0; i < env_argv->argc; i++) {
VLOG(1) << " argv[" << i << "] = " << env_argv->argv[i];
}
}
QCHECK(tsl::Flags::Parse(&env_argv->argc, env_argv->argv.data(), flag_list))
<< "Flag parsing failed.\n"
<< tsl::Flags::Usage(getenv(std::string(envvar).c_str()), flag_list);
} | TEST(ParseFlagsFromEnv, Basic) {
tsl::setenv("TF_XLA_FLAGS", kTestFlagString, true );
TestParseFlagsFromEnv("(flags in environment variable)");
}
TEST(ParseFlagsFromEnv, File) {
static const char* kTempVars[] = {"TEST_TMPDIR", "TMP"};
static const char kTempDir[] = "/tmp";
const char* tmp_dir = nullptr;
for (int i = 0; i != TF_ARRAYSIZE(kTempVars) && tmp_dir == nullptr; i++) {
tmp_dir = getenv(kTempVars[i]);
}
if (tmp_dir == nullptr) {
tmp_dir = kTempDir;
}
std::string tmp_file =
absl::StrFormat("%s/parse_flags_from_env.%d", tmp_dir, getpid());
FILE* fp = fopen(tmp_file.c_str(), "w");
CHECK_NE(fp, nullptr) << "can't write to " << tmp_file;
for (int i = 0; kTestFlagString[i] != '\0'; i++) {
putc(kTestFlagString[i], fp);
}
fflush(fp);
CHECK_EQ(ferror(fp), 0) << "writes failed to " << tmp_file;
fclose(fp);
tsl::setenv("TF_XLA_FLAGS", tmp_file.c_str(), true );
TestParseFlagsFromEnv("(flags in file)");
unlink(tmp_file.c_str());
}
TEST(ParseFlagsFromEnv, EnvAndFlag) {
static struct {
const char* env;
const char* arg;
const char* expected_value;
} test[] = {
{nullptr, nullptr, "1\n"},
{nullptr, "--int_flag=2", "2\n"},
{"--int_flag=3", nullptr, "3\n"},
{"--int_flag=3", "--int_flag=2", "2\n"},
};
for (int i = 0; i != TF_ARRAYSIZE(test); i++) {
if (test[i].env == nullptr) {
tsl::unsetenv("TF_XLA_FLAGS");
} else {
tsl::setenv("TF_XLA_FLAGS", test[i].env, true);
}
tsl::SubProcess child;
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--recursing");
if (test[i].arg != nullptr) {
argv.push_back(test[i].arg);
}
child.SetProgram(binary_name, argv);
child.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
CHECK(child.Start()) << "test " << i;
std::string stdout_str;
std::string stderr_str;
int child_status = child.Communicate(nullptr, &stdout_str, &stderr_str);
CHECK_EQ(child_status, 0) << "test " << i << "\nstdout\n"
<< stdout_str << "\nstderr\n"
<< stderr_str;
stdout_str.erase(std::remove(stdout_str.begin(), stdout_str.end(), '\r'),
stdout_str.end());
CHECK_EQ(stdout_str, test[i].expected_value) << "test " << i;
}
}
TEST(ParseFlagsFromEnv, ErrorOutOnFlagFailure) {
const char* env = "--int_flag=3parsefailure";
if (env == nullptr) {
tsl::unsetenv("TF_XLA_FLAGS");
} else {
tsl::setenv("TF_XLA_FLAGS", env, true);
}
tsl::SubProcess child;
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--recursing");
child.SetProgram(binary_name, argv);
child.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
EXPECT_TRUE(child.Start());
std::string stdout_str;
std::string stderr_str;
int child_status = child.Communicate(nullptr, &stdout_str, &stderr_str);
EXPECT_NE(child_status, 0);
}
TEST(ParseFlagsFromEnv, ErrorOutOnUnknownFlag) {
const char* env = "--int_flag=3 --unknown_flag=value";
if (env == nullptr) {
tsl::unsetenv("TF_XLA_FLAGS");
} else {
tsl::setenv("TF_XLA_FLAGS", env, true);
}
tsl::SubProcess child;
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--recursing");
child.SetProgram(binary_name, argv);
child.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
EXPECT_TRUE(child.Start());
std::string stdout_str;
std::string stderr_str;
int child_status = child.Communicate(nullptr, &stdout_str, &stderr_str);
EXPECT_NE(child_status, 0);
} |
#include "tensorflow/core/framework/node_def_util.h"
#include <algorithm>
#include <unordered_map>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/scanner.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
const char* const kColocationAttrName = "_class";
const char* const kColocationGroupPrefix = "loc:@";
const char* const kTpuExecuteStagingOp = "IdentityN";
const char* const kTpuExecuteStagingNodeName = "_variable_copy";
AttrSlice::AttrSlice() : ndef_(nullptr) {
static const AttrValueMap* const kEmptyAttrValueMap = new AttrValueMap;
attrs_ = kEmptyAttrValueMap;
}
AttrSlice::AttrSlice(const NodeDef& node_def)
: ndef_(&node_def), attrs_(nullptr) {}
AttrSlice::AttrSlice(const AttrValueMap* a) : ndef_(nullptr), attrs_(a) {}
string SummarizeAttrsHelper(AttrSlice attrs, StringPiece device) {
string ret;
std::vector<string> attr_names;
attr_names.reserve(attrs.size());
for (const auto& attr : attrs) {
attr_names.push_back(attr.first);
}
std::sort(attr_names.begin(), attr_names.end());
bool first = true;
for (const string& attr_name : attr_names) {
if (!first) strings::StrAppend(&ret, ", ");
first = false;
strings::StrAppend(&ret, attr_name, "=",
SummarizeAttrValue(*attrs.Find(attr_name)));
}
if (!device.empty()) {
if (!first) strings::StrAppend(&ret, ", ");
first = false;
strings::StrAppend(&ret, "_device=\"", device, "\"");
}
return ret;
}
string AttrSlice::SummarizeNode() const {
return ndef_ ? SummarizeNodeDef(*ndef_)
: strings::StrCat(
"[", SummarizeAttrsHelper(*this, StringPiece()), "]");
}
string AttrSlice::DebugString() const {
std::vector<string> attr_key_vals;
attr_key_vals.reserve(attrs()->size());
for (const auto& it : *this) {
const string& name = it.first;
const AttrValue& attr_value = it.second;
attr_key_vals.push_back(
absl::StrCat(name, "=", SummarizeAttrValue(attr_value)));
}
return absl::StrJoin(attr_key_vals, ", ");
}
string SummarizeNodeDef(const NodeDef& node_def, int max_inputs_in_summary) {
string ret = strings::StrCat(errors::FormatNodeNameForError(node_def.name()),
" = ", node_def.op(), "[");
strings::StrAppend(&ret, SummarizeAttrsHelper(node_def, node_def.device()));
strings::StrAppend(&ret, "](");
bool first = true;
for (const string& input : node_def.input()) {
if (!first) strings::StrAppend(&ret, ", ");
first = false;
if (max_inputs_in_summary-- == 0) {
strings::StrAppend(&ret, "...");
break;
}
strings::StrAppend(&ret, input);
}
strings::StrAppend(&ret, ")");
return ret;
}
string SummarizeAttrs(const NodeDef& node_def) {
return SummarizeAttrsHelper(node_def, node_def.device());
}
string FormatNodeDefForError(
StringPiece node_name, bool has_experimental_debug_info,
const NodeDef_ExperimentalDebugInfo& experimental_debug_info) {
return !has_experimental_debug_info ||
experimental_debug_info.original_node_names().empty()
? errors::FormatNodeNameForError(string(node_name))
: errors::FormatOriginalNodeLocationForError(
experimental_debug_info.original_node_names(),
experimental_debug_info.original_func_names());
}
string FormatNodeDefForError(const NodeDef& node_def) {
return FormatNodeDefForError(node_def.name(),
node_def.has_experimental_debug_info(),
node_def.experimental_debug_info());
}
const AttrValue* AttrSlice::Find(StringPiece attr_name) const {
for (const auto& attr : *attrs()) {
if (attr.first == attr_name) {
return &attr.second;
}
}
return nullptr;
}
const AttrValue* AttrSlice::FindByString(const string& attr_name) const {
auto iter = attrs()->find(attr_name);
if (iter != attrs()->end()) {
return &iter->second;
} else {
return nullptr;
}
}
Status AttrSlice::CheckFind(StringPiece attr_name,
const AttrValue* attr_value) const {
if (attr_value != nullptr) {
return OkStatus();
}
Status s = errors::NotFound("No attr named '", attr_name, "' in NodeDef:");
if (!absl::StartsWith(attr_name, "_") && ndef_ != nullptr) {
s = AttachDef(s, *ndef_);
}
return s;
}
Status AttrSlice::Find(StringPiece attr_name,
const AttrValue** attr_value) const {
*attr_value = Find(attr_name);
return CheckFind(attr_name, *attr_value);
}
Status AttrSlice::FindByString(const string& attr_name,
const AttrValue** attr_value) const {
*attr_value = FindByString(attr_name);
return CheckFind(attr_name, *attr_value);
}
bool AttrSlice::EqualAttrs(AttrSlice other, Scratch* scratch) const {
if (size() != other.size()) return false;
for (const auto& attr : *other.attrs()) {
auto iter = attrs()->find(attr.first);
if (iter == attrs()->end()) return false;
iter->second.SerializeToString(&scratch->a);
attr.second.SerializeToString(&scratch->b);
if (scratch->a != scratch->b) return false;
}
return true;
}
#define DEFINE_GET_ATTR(TYPE, FIELD, ATTR_TYPE, APPEND_OP, CAST, ...) \
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
TYPE* value) { \
const AttrValue* attr_value; \
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value)); \
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, ATTR_TYPE)); \
const auto& v = attr_value->FIELD(); \
__VA_ARGS__; \
*value = CAST; \
return OkStatus(); \
} \
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
std::vector<TYPE>* value) { \
const AttrValue* attr_value; \
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value)); \
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "list(" ATTR_TYPE ")")); \
value->reserve(attr_value->list().FIELD().size()); \
for (const auto& v : attr_value->list().FIELD()) { \
__VA_ARGS__; \
value->APPEND_OP(CAST); \
} \
return OkStatus(); \
}
#define DEFINE_TRY_GET_ATTR(TYPE, FIELD, ATTR_TYPE, APPEND_OP, CAST, ...) \
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
TYPE* value) { \
const AttrValue* attr_value = attrs.Find(attr_name); \
if (attr_value == nullptr) { \
return false; \
} \
Status s = AttrValueHasType(*attr_value, ATTR_TYPE); \
if (!s.ok()) { \
return false; \
} \
const auto& v = attr_value->FIELD(); \
__VA_ARGS__; \
*value = CAST; \
return true; \
} \
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
std::vector<TYPE>* value) { \
const AttrValue* attr_value = attrs.Find(attr_name); \
if (attr_value == nullptr) { \
return false; \
} \
Status s = AttrValueHasType(*attr_value, "list(" ATTR_TYPE ")"); \
if (!s.ok()) { \
return false; \
} \
value->reserve(attr_value->list().FIELD().size()); \
for (const auto& v : attr_value->list().FIELD()) { \
__VA_ARGS__; \
value->APPEND_OP(CAST); \
} \
return true; \
}
DEFINE_GET_ATTR(tstring, s, "string", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(tstring, s, "string", emplace_back, v, ;)
DEFINE_GET_ATTR(string, s, "string", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(string, s, "string", emplace_back, v, ;)
DEFINE_GET_ATTR(int64_t, i, "int", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(int64_t, i, "int", emplace_back, v, ;)
DEFINE_GET_ATTR(
int32, i, "int", emplace_back, static_cast<int32>(v),
if (static_cast<int64_t>(static_cast<int32>(v)) != v) {
return errors::InvalidArgument("Attr ", attr_name, " has value ", v,
" out of range for an int32");
})
DEFINE_TRY_GET_ATTR(
int32, i, "int", emplace_back, static_cast<int32>(v),
if (static_cast<int64_t>(static_cast<int32>(v)) != v) {
static int log_counter = 0;
if (log_counter < 10) {
log_counter++;
LOG(WARNING) << "Attr " << attr_name << " has value " << v
<< " out of range for an int32";
}
return false;
})
DEFINE_GET_ATTR(float, f, "float", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(float, f, "float", emplace_back, v, ;)
DEFINE_GET_ATTR(bool, b, "bool", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(bool, b, "bool", emplace_back, v, ;)
DEFINE_GET_ATTR(DataType, type, "type", emplace_back, static_cast<DataType>(v),
;)
DEFINE_TRY_GET_ATTR(DataType, type, "type", emplace_back,
static_cast<DataType>(v),
;)
DEFINE_GET_ATTR(TensorShapeProto, shape, "shape", emplace_back, v, ;)
DEFINE_GET_ATTR(TensorShape, shape, "shape", emplace_back, TensorShape(v),
TF_RETURN_IF_ERROR(TensorShape::IsValidShape(v));)
DEFINE_TRY_GET_ATTR(
TensorShape, shape, "shape", emplace_back, TensorShape(v),
if (!TensorShape::IsValidShape(v).ok()) {
static int log_counter = 0;
if (log_counter < 10) {
log_counter++;
LOG(WARNING) << "Attr " << attr_name << " has invalid shape value "
<< v.DebugString();
}
return false;
})
DEFINE_GET_ATTR(PartialTensorShape, shape, "shape", emplace_back,
PartialTensorShape(v),
TF_RETURN_IF_ERROR(PartialTensorShape::IsValidShape(v));)
DEFINE_GET_ATTR(
Tensor, tensor, "tensor", emplace_back, t, Tensor t; if (!t.FromProto(v)) {
return errors::InvalidArgument("Attr ", attr_name, " has value ",
v.ShortDebugString(),
" that can't be converted to a Tensor");
})
DEFINE_GET_ATTR(NameAttrList, func, "func", emplace_back, v, ;);
#undef DEFINE_GET_ATTR
bool HasNodeAttr(const NodeDef& node_def, StringPiece attr_name) {
return node_def.attr().find(string(attr_name)) != node_def.attr().end();
}
static const string& kEmptyString = *new string();
const string& GetNodeAttrString(const AttrSlice& attrs, StringPiece attr_name) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return kEmptyString;
}
Status s = AttrValueHasType(*attr_value, "string");
if (!s.ok()) {
return kEmptyString;
}
return attr_value->s();
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
std::vector<const string*>* value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "list(string)");
if (!s.ok()) {
return false;
}
value->reserve(attr_value->list().s().size());
for (const auto& v : attr_value->list().s()) {
value->push_back(&v);
}
return true;
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
std::vector<const TensorShapeProto*>* value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "list(shape)");
if (!s.ok()) {
return false;
}
value->reserve(attr_value->list().shape().size());
for (const auto& v : attr_value->list().shape()) {
value->push_back(&v);
}
return true;
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
DataTypeVector* value) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value));
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "list(type)"));
for (const auto& v : attr_value->list().type()) {
value->push_back(static_cast<DataType>(v));
}
return OkStatus();
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const TensorProto** value) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value));
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "tensor"));
*value = &attr_value->tensor();
return OkStatus();
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const TensorProto** value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "tensor");
if (!s.ok()) {
return false;
}
*value = &attr_value->tensor();
return true;
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const NameAttrList** value) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value));
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "func"));
*value = &attr_value->func();
return OkStatus();
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const NameAttrList** value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "func");
if (!s.ok()) {
return false;
}
*value = &attr_value->func();
return true;
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
Padding* value) {
string str_value;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, attr_name, &str_value));
return GetPaddingFromString(str_value, value);
}
namespace {
template <class NodeDefOrAttrSlice>
Status AddArgToSig(const NodeDefOrAttrSlice& node_or_attrs,
const OpDef::ArgDef& arg_def, DataTypeVector* sig) {
const int original_size = sig->size();
if (!arg_def.number_attr().empty()) {
int64_t repeats = -1;
TF_RETURN_IF_ERROR(
GetNodeAttr(node_or_attrs, arg_def.number_attr(), &repeats));
if (static_cast<int64_t>(static_cast<int32>(repeats)) != repeats) {
return errors::InvalidArgument("Number of outputs is too big: ", repeats);
}
if (repeats < 0) {
return errors::InvalidArgument("Value for number_attr() ", repeats,
" < 0");
}
if (!arg_def.type_attr().empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(
GetNodeAttr(node_or_attrs, arg_def.type_attr(), &dtype));
for (int i = 0; i < repeats; ++i) {
sig->push_back(dtype);
}
} else if (arg_def.type() != DT_INVALID) {
for (int i = 0; i < repeats; ++i) {
sig->push_back(arg_def.type());
}
} else {
return errors::InvalidArgument("Missing type or type_attr field in ",
arg_def.ShortDebugString());
}
} else if (!arg_def.type_attr().empty()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(AttrSlice(node_or_attrs)
.FindByString(arg_def.type_attr(), &attr_value));
sig->push_back(attr_value->type());
} else if (!arg_def.type_list_attr().empty()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(
AttrSlice(node_or_attrs)
.FindByString(arg_def.type_list_attr(), &attr_value));
for (int dtype : attr_value->list().type()) {
sig->push_back(static_cast<DataType>(dtype));
}
} else if (arg_def.type() != DT_INVALID) {
sig->push_back(arg_def.type());
} else {
return errors::InvalidArgument("No type fields in ",
arg_def.ShortDebugString());
}
if (arg_def.is_ref()) {
for (size_t i = original_size; i < sig->size(); ++i) {
if (IsRefType((*sig)[i])) {
return errors::InvalidArgument(
"Requested reference to a reference type: ",
arg_def.ShortDebugString());
}
(*sig)[i] = MakeRefType((*sig)[i]);
}
}
return OkStatus();
}
}
Status InputTypeForNode(const NodeDef& node_def, const OpDef& op_def,
int input_port, DataType* input_type) {
DataTypeVector input_types;
for (const auto& arg : op_def.input_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &input_types));
int input_types_size = input_types.size();
if (input_types_size > input_port) {
const DataType dtype = input_types[input_port];
*input_type = dtype;
return OkStatus();
}
}
return errors::InvalidArgument("Input ", input_port, " not found for node ",
node_def.name());
}
Status InputTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* inputs) {
for (const auto& arg : op_def.input_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, inputs));
}
return OkStatus();
}
Status OutputTypeForNode(const NodeDef& node_def, const OpDef& op_def,
int output_port, DataType* output_type) {
DataTypeVector output_types;
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &output_types));
int output_types_size = output_types.size();
if (output_types_size > output_port) {
const DataType dtype = output_types[output_port];
*output_type = dtype;
return OkStatus();
}
}
return errors::InvalidArgument("Output ", output_port, " not found for node ",
node_def.name());
}
Status OutputTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* outputs) {
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, outputs));
}
return OkStatus();
}
Status OutputTypesForNode(const AttrSlice& attrs, const OpDef& op_def,
DataTypeVector* outputs) {
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(attrs, arg, outputs));
}
return OkStatus();
}
Status InOutTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* inputs, DataTypeVector* outputs) {
TF_RETURN_IF_ERROR(InputTypesForNode(node_def, op_def, inputs));
return OutputTypesForNode(node_def, op_def, outputs);
}
Status NumOutputsForNode(const NodeDef& node_def, const OpDef& op_def,
int* num_outputs) {
DataTypeVector outputs;
TF_RETURN_IF_ERROR(OutputTypesForNode(node_def, op_def, &outputs));
*num_outputs = outputs.size();
return OkStatus();
}
int OpPortIdToArgId(const NodeDef& node,
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args,
int port_id) {
for (int arg_id = 0; arg_id < args.size(); ++arg_id) {
if (port_id < 0) {
return -1;
} else if (port_id == 0) {
return arg_id;
}
int n = 1;
const auto& arg = args.Get(arg_id);
if (!arg.number_attr().empty()) {
n = node.attr().at(arg.number_attr()).i();
} else if (!arg.type_list_attr().empty()) {
n = node.attr().at(arg.type_list_attr()).list().type_size();
}
if (n < 0) {
DCHECK_GE(n, 0);
return -1;
} else if (port_id < n) {
return arg_id;
}
port_id -= n;
}
return -1;
}
Status ValidateNodeDef(const NodeDef& node_def, const OpDef& op_def) {
if (node_def.op() != op_def.name()) {
return errors::InvalidArgument(
"NodeDef op '", node_def.op(), "' does not match ",
SummarizeOpDef(op_def), "; NodeDef: ", FormatNodeDefForError(node_def));
}
bool seen_control = false;
size_t num_inputs = 0;
for (const string& input : node_def.input()) {
if (absl::StartsWith(input, "^")) {
seen_control = true;
if (input.find(':') != string::npos) {
return errors::InvalidArgument("Control input '", input,
"' must not have ':' in NodeDef: ",
FormatNodeDefForError(node_def));
}
} else if (seen_control) {
return errors::InvalidArgument("Non-control input '", input,
"' after control input in NodeDef: ",
FormatNodeDefForError(node_def));
} else {
++num_inputs;
}
}
std::unordered_map<string, const OpDef::AttrDef*> op_attrs;
for (const auto& attr : op_def.attr()) {
if (!gtl::InsertIfNotPresent(&op_attrs, attr.name(), &attr)) {
return errors::InvalidArgument("OpDef has duplicate attr name '",
attr.name(),
"': ", SummarizeOpDef(op_def));
}
}
for (const auto& attr : node_def.attr()) {
if (absl::StartsWith(attr.first, "_")) {
continue;
}
auto iter = op_attrs.find(attr.first);
if (iter == op_attrs.end()) {
LOG_EVERY_N_SEC(ERROR, 5)
<< "NodeDef mentions attribute " << attr.first
<< " which is not in the op definition: " << SummarizeOpDef(op_def)
<< " This may be expected if your graph generating binary is newer "
<< " than this binary. Unknown attributes will be ignored."
<< " NodeDef: " << FormatNodeDefForError(node_def);
continue;
}
if (attr.second.placeholder().empty()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
ValidateAttrValue(attr.second, *iter->second),
"; NodeDef: ", FormatNodeDefForError(node_def), "; ",
SummarizeOpDef(op_def));
}
op_attrs.erase(iter);
}
if (!op_attrs.empty()) {
string attrs;
for (const auto& attr_pair : op_attrs) {
if (!attrs.empty()) strings::StrAppend(&attrs, "', '");
strings::StrAppend(&attrs, attr_pair.first);
}
return errors::InvalidArgument(
"NodeDef missing attr", op_attrs.size() == 1 ? " '" : "s '", attrs,
"' from ", SummarizeOpDef(op_def),
"; NodeDef: ", FormatNodeDefForError(node_def));
}
DataTypeVector inputs, outputs;
TF_RETURN_IF_ERROR(InOutTypesForNode(node_def, op_def, &inputs, &outputs));
if (num_inputs != inputs.size()) {
return errors::InvalidArgument(
"NodeDef expected inputs '", DataTypeVectorString(inputs),
"' do not match ", num_inputs, " inputs specified; ",
SummarizeOpDef(op_def), "; NodeDef: ", FormatNodeDefForError(node_def));
}
return OkStatus();
}
namespace {
Status ComputeArgRange(const AttrSlice& attrs, const OpDef::ArgDef& arg_def,
const OpDef& op_def, int* num) {
if (!arg_def.number_attr().empty()) {
return GetNodeAttr(attrs, arg_def.number_attr(), num);
} else if (!arg_def.type_list_attr().empty()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(arg_def.type_list_attr(), &attr_value));
*num = attr_value->list().type_size();
} else if (!arg_def.type_attr().empty() || arg_def.type() != DT_INVALID) {
*num = 1;
} else {
return errors::InvalidArgument(
"Argument '", arg_def.name(),
"' incorrectly specified in op definition: ", SummarizeOpDef(op_def));
}
return OkStatus();
}
Status NameRangesHelper(const AttrSlice& attrs,
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args,
const OpDef& op_def, NameRangeMap* result) {
int start = 0;
int num;
for (const auto& arg : args) {
TF_RETURN_IF_ERROR(ComputeArgRange(attrs, arg, op_def, &num));
(*result)[arg.name()] = std::make_pair(start, start + num);
start += num;
}
return OkStatus();
}
}
Status NameRangesForNode(const AttrSlice& attrs, const OpDef& op_def,
NameRangeMap* inputs, NameRangeMap* outputs) {
if (inputs != nullptr) {
TF_RETURN_IF_ERROR(
NameRangesHelper(attrs, op_def.input_arg(), op_def, inputs));
}
if (outputs != nullptr) {
return NameRangesHelper(attrs, op_def.output_arg(), op_def, outputs);
}
return OkStatus();
}
void AddDefaultsToNodeDef(const OpDef& op_def, NodeDef* node_def) {
for (const auto& attr_def : op_def.attr()) {
AttrSlice attrs(*node_def);
if (attr_def.has_default_value() && !attrs.Find(attr_def.name())) {
AddNodeAttr(attr_def.name(), attr_def.default_value(), node_def);
}
}
}
void StripDefaultsFromNodeDef(const OpDef& op_def, NodeDef* node_def) {
AttrSlice attrs(*node_def);
for (const auto& attr_def : op_def.attr()) {
if (attr_def.has_default_value()) {
const AttrValue* attr = attrs.Find(attr_def.name());
if (attr && AreAttrValuesEqual(*attr, attr_def.default_value()))
node_def->mutable_attr()->erase(attr_def.name());
}
}
}
namespace {
using ::tensorflow::tstring;
using ::tensorflow::strings::Scanner;
bool IsValidNodeName(StringPiece sp) {
Scanner scanner(sp);
scanner.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scanner.GetResult())
return false;
if (scanner.empty())
return true;
scanner.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
bool IsValidDataInputName(StringPiece sp) {
Scanner scan(sp);
scan.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scan.GetResult())
return false;
if (scan.empty())
return true;
if (scan.Peek() == ':') {
scan.OneLiteral(":");
if (scan.Peek() == '0') {
scan.OneLiteral("0");
} else {
scan.Many(Scanner::DIGIT);
}
} else {
scan.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
}
bool IsValidControlInputName(StringPiece sp) {
Scanner scan(sp);
scan.OneLiteral("^")
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scan.GetResult())
return false;
if (scan.empty())
return true;
scan.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
const StringPiece kColocationGroupPrefixStringPiece(kColoca | #include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
OpDef ToOpDef(const OpDefBuilder& builder) {
OpRegistrationData op_reg_data;
TF_EXPECT_OK(builder.Finalize(&op_reg_data));
return op_reg_data.op_def;
}
NodeDef ToNodeDef(const string& text) {
NodeDef node_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &node_def));
return node_def;
}
NodeDef ToNodeDef(NodeDefBuilder&& builder) {
NodeDef node_def;
TF_EXPECT_OK(builder.Finalize(&node_def));
return node_def;
}
void ExpectSuccess(const NodeDef& good, const OpDef& op_def) {
EXPECT_EQ(OkStatus(), ValidateNodeDef(good, op_def))
<< "NodeDef: " << SummarizeNodeDef(good)
<< "; OpDef: " << SummarizeOpDef(op_def);
}
void ExpectFailure(const NodeDef& bad, const OpDef& op_def,
const string& message) {
Status status = ValidateNodeDef(bad, op_def);
EXPECT_FALSE(status.ok()) << "NodeDef: " << SummarizeNodeDef(bad)
<< "; OpDef: " << SummarizeOpDef(op_def);
if (status.ok()) return;
EXPECT_TRUE(errors::IsInvalidArgument(status))
<< status << "; NodeDef: " << SummarizeNodeDef(bad)
<< "; OpDef: " << SummarizeOpDef(op_def);
LOG(INFO) << "Message: " << status.message();
EXPECT_TRUE(absl::StrContains(status.ToString(), message))
<< "NodeDef: " << SummarizeNodeDef(bad)
<< "; OpDef: " << SummarizeOpDef(op_def) << "\nActual error: " << status
<< "\nDoes not contain: " << message;
}
TEST(NodeDefUtilTest, In) {
const OpDef op = ToOpDef(OpDefBuilder("In").Input("i: T").Attr("T: type"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'In'
input: 'a'
attr {
key: 'T'
value { type: DT_FLOAT }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = In[T=DT_FLOAT](a)", SummarizeNodeDef(node_def));
NodeDef bad = node_def;
bad.set_op("Wrong");
ExpectFailure(bad, op, "NodeDef op 'Wrong' does not match Op<name=In;");
bad = node_def;
bad.clear_attr();
ExpectFailure(bad, op, "NodeDef missing attr 'T' from Op<name=In;");
bad = node_def;
bad.clear_attr();
AddNodeAttr("T", 17, &bad);
ExpectFailure(
bad, op,
"AttrValue had value with type 'int' when 'type' expected\n\t for attr "
"'T'\n\t; NodeDef: ");
bad = node_def;
bad.add_input("b");
ExpectFailure(
bad, op,
"NodeDef expected inputs 'float' do not match 2 inputs specified;");
bad = node_def;
bad.clear_input();
ExpectFailure(
bad, op,
"NodeDef expected inputs 'float' do not match 0 inputs specified;");
NodeDef good = node_def;
good.add_input("^b");
ExpectSuccess(node_def, op);
bad = node_def;
bad.clear_input();
bad.add_input("^b");
bad.add_input("a");
ExpectFailure(bad, op,
"Non-control input 'a' after control input "
"in NodeDef:");
bad = node_def;
bad.add_input("^b:0");
ExpectFailure(bad, op, "Control input '^b:0' must not have ':' in NodeDef:");
}
TEST(NodeDefUtilTest, Out) {
const OpDef op =
ToOpDef(OpDefBuilder("Out").Output("o: T").Attr("T: numbertype"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'Out'
attr {
key: 'T'
value { type: DT_INT32 }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = Out[T=DT_INT32]()", SummarizeNodeDef(node_def));
NodeDef bad = node_def;
bad.clear_attr();
AddNodeAttr("T", DT_STRING, &bad);
ExpectFailure(bad, op,
"Value for attr 'T' of string is not in the list of allowed "
"values: float, double, int32, uint8, int16, int8, complex64, "
"int64, qint8, quint8, qint32, bfloat16, qint16, quint16, "
"uint16, complex128, "
"half, uint32, uint64");
}
TEST(NodeDefUtilTest, Enum) {
const OpDef op = ToOpDef(OpDefBuilder("Enum").Attr("e: {'apple','orange'}"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'Enum'
attr {
key: 'e'
value { s: 'apple' }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = Enum[e=\"apple\"]()", SummarizeNodeDef(node_def));
NodeDef good = node_def;
good.clear_attr();
AddNodeAttr("e", "orange", &good);
ExpectSuccess(good, op);
NodeDef bad = node_def;
bad.clear_attr();
AddNodeAttr("e", "foo", &bad);
ExpectFailure(bad, op,
"Value for attr 'e' of \"foo\" is not in the list of allowed "
"values: \"apple\", \"orange\"");
}
TEST(NodeDefUtilTest, SameIn) {
const OpDef op = ToOpDef(OpDefBuilder("SameIn")
.Input("i: N * T")
.Attr("N: int >= 2")
.Attr("T: {float,double}"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'SameIn'
input: 'a'
input: 'b'
attr {
key: 'N'
value { i: 2 }
}
attr {
key: 'T'
value { type: DT_DOUBLE }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = SameIn[N=2, T=DT_DOUBLE](a, b)",
SummarizeNodeDef(node_def));
NodeDef bad = ToNodeDef(R"pb(
name: 'n'
op: 'SameIn'
input: 'a'
input: 'b'
attr {
key: 'N'
value { i: 2 }
}
attr {
key: 'T'
value { type: DT_STRING }
}
)pb");
ExpectFailure(bad, op,
"Value for attr 'T' of string is not in the list of allowed "
"values: float, double");
bad = ToNodeDef(R"pb(
name: 'n'
op: 'SameIn'
input: 'a'
input: 'b'
attr {
key: 'N'
value { i: 1 }
}
attr {
key: 'T'
value { type: DT_FLOAT }
}
)pb");
ExpectFailure(bad, op, "Value for attr 'N' of 1 must be at least minimum 2");
}
TEST(NodeDefUtilTest, AnyIn) {
const OpDef op =
ToOpDef(OpDefBuilder("AnyIn").Input("i: T").Attr("T: list(type) >= 1"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = AnyIn[T=[DT_INT32, DT_STRING]](a, b)",
SummarizeNodeDef(node_def));
const NodeDef bad = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
attr {
key: 'T'
value { list {} }
}
)pb");
ExpectFailure(bad, op, "Length for attr 'T' of 0 must be at least minimum 1");
const NodeDef bad2 = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
attr {
key: 'T'
value {}
}
)pb");
ExpectFailure(bad2, op,
"Length for attr 'T' of 0 must be at least minimum 1");
}
TEST(NodeDefUtilTest, Device) {
const OpDef op_def1 = ToOpDef(OpDefBuilder("None"));
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("d", &op_def1).Device("/cpu:17")));
ExpectSuccess(node_def1, op_def1);
EXPECT_EQ("{{node d}} = None[_device=\"/cpu:17\"]()",
SummarizeNodeDef(node_def1));
const OpDef op_def2 = ToOpDef(OpDefBuilder("WithAttr").Attr("v: int"));
const NodeDef node_def2 = ToNodeDef(
std::move(NodeDefBuilder("d", &op_def2).Attr("v", 7).Device("/cpu:5")));
ExpectSuccess(node_def2, op_def2);
EXPECT_EQ("{{node d}} = WithAttr[v=7, _device=\"/cpu:5\"]()",
SummarizeNodeDef(node_def2));
}
void ExpectValidSyntax(const NodeDef& good) {
EXPECT_EQ(OkStatus(), ValidateExternalNodeDefSyntax(good))
<< "NodeDef: " << SummarizeNodeDef(good);
}
void ExpectInvalidSyntax(const NodeDef& bad, const string& message) {
Status status = ValidateExternalNodeDefSyntax(bad);
ASSERT_FALSE(status.ok()) << "NodeDef: " << SummarizeNodeDef(bad);
EXPECT_TRUE(errors::IsInvalidArgument(status))
<< status << "; NodeDef: " << SummarizeNodeDef(bad);
EXPECT_TRUE(absl::StrContains(StringPiece(status.ToString()), message))
<< "NodeDef: " << SummarizeNodeDef(bad) << ", " << status << ", "
<< message;
}
TEST(NodeDefUtilTest, ValidSyntax) {
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def);
const NodeDef node_def_namespace = ToNodeDef(R"pb(
name: 'n'
op: 'Project>AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_namespace);
const NodeDef node_def_explicit_inputs = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a:0'
input: 'b:123'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_explicit_inputs);
EXPECT_EQ("{{node n}} = AnyIn[T=[DT_INT32, DT_STRING]](a:0, b:123)",
SummarizeNodeDef(node_def_explicit_inputs));
const NodeDef node_def_explicit_inputs_namespace = ToNodeDef(R"pb(
name: 'Project>n'
op: 'Project>AnyIn'
input: 'Project>a:0'
input: 'Project>b:123'
input: '^Project>c'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_explicit_inputs_namespace);
EXPECT_EQ(
"{{node Project>n}} = Project>AnyIn[T=[DT_INT32, DT_STRING]]"
"(Project>a:0, Project>b:123, ^Project>c)",
SummarizeNodeDef(node_def_explicit_inputs_namespace));
const NodeDef node_def_partial_shape = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
attr {
key: 'shp'
value {
shape {
dim { size: -1 }
dim { size: 0 }
}
}
}
)pb");
ExpectValidSyntax(node_def_partial_shape);
const NodeDef node_def_control_input = ToNodeDef(R"pb(
name: 'n-'
op: 'AnyIn'
input: 'a'
input: '^b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_control_input);
const NodeDef node_def_invalid_name = ToNodeDef(R"pb(
name: 'n:0'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_invalid_name, "Illegal op name 'n:0'");
const NodeDef node_def_internal_name = ToNodeDef(R"pb(
name: '_n'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_internal_name, "Illegal op name '_n'");
const NodeDef node_def_slash_in_name = ToNodeDef(R"pb(
name: 'n\\'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_slash_in_name, "Illegal op name 'n\\'");
const NodeDef node_def_internal_input_name = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: '_a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_internal_input_name,
"Illegal op input name '_a'");
const NodeDef node_def_input_name_slash = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a\\'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_input_name_slash, "Illegal op input name 'a\\'");
const NodeDef node_def_invalid_control_input_name = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: '^b:0'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_invalid_control_input_name,
"Illegal op input name '^b:0'");
const NodeDef node_def_control_input_name_slash = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: '^b\\'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_control_input_name_slash,
"Illegal op input name '^b\\'");
const NodeDef node_def_data_input_after_control = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: '^a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_data_input_after_control,
"All control inputs must follow all data inputs");
const NodeDef node_def_data_input_invalid_port = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a:b'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_data_input_invalid_port,
"Illegal op input name 'a:b");
const NodeDef node_def_data_input_invalid_port2 = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a:00'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_data_input_invalid_port2,
"Illegal op input name 'a:00");
}
TEST(InputTypesForNode, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const NodeDef node_def = ToNodeDef(std::move(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput())));
DataTypeVector types;
EXPECT_TRUE(InputTypesForNode(node_def, op_def, &types).ok());
EXPECT_EQ(types[0], DT_FLOAT);
EXPECT_EQ(types[1], DT_INT32);
DataType type;
EXPECT_TRUE(InputTypeForNode(node_def, op_def, 0, &type).ok());
EXPECT_EQ(type, DT_FLOAT);
EXPECT_TRUE(InputTypeForNode(node_def, op_def, 1, &type).ok());
EXPECT_EQ(type, DT_INT32);
EXPECT_FALSE(InputTypeForNode(node_def, op_def, 2, &type).ok());
}
TEST(OutputTypesForNode, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const NodeDef node_def = ToNodeDef(std::move(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput())));
DataTypeVector types;
EXPECT_TRUE(OutputTypesForNode(node_def, op_def, &types).ok());
EXPECT_EQ(types[0], DT_STRING);
EXPECT_EQ(types[1], DT_BOOL);
DataType type;
EXPECT_TRUE(OutputTypeForNode(node_def, op_def, 0, &type).ok());
EXPECT_EQ(type, DT_STRING);
EXPECT_TRUE(OutputTypeForNode(node_def, op_def, 1, &type).ok());
EXPECT_EQ(type, DT_BOOL);
EXPECT_FALSE(OutputTypeForNode(node_def, op_def, 2, &type).ok());
}
TEST(OutputTypesForNode, LargeOutput) {
const OpDef op_def = ToOpDef(OpDefBuilder("TestSplitOp")
.Input("value: int64")
.Output("output: num_split * int64")
.Attr("num_split: int >= 1"));
int64_t num_split = 1000000000000;
const NodeDef node_def =
ToNodeDef(std::move(NodeDefBuilder("test_split_op", &op_def)
.Input(FakeInput())
.Attr("num_split", num_split)));
DataTypeVector types;
EXPECT_FALSE(OutputTypesForNode(node_def, op_def, &types).ok());
}
TEST(OutputTypesForNode_AttrSliceOverload, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const AttrSlice attr_slice =
AttrSlice(ToNodeDef(std::move(NodeDefBuilder("simple", &op_def)
.Input(FakeInput())
.Input(FakeInput()))));
DataTypeVector types;
EXPECT_TRUE(OutputTypesForNode(attr_slice, op_def, &types).ok());
EXPECT_EQ(types[0], DT_STRING);
EXPECT_EQ(types[1], DT_BOOL);
}
TEST(NameRangesForNodeTest, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
NameRangeMap inputs, outputs;
const NodeDef node_def = ToNodeDef(std::move(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput())));
TF_EXPECT_OK(NameRangesForNode(node_def, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 2}}}), outputs);
EXPECT_EQ("{{node simple}} = Simple[](a, b)", SummarizeNodeDef(node_def));
OpDef bad_op_def = op_def;
bad_op_def.mutable_input_arg(0)->clear_type();
EXPECT_FALSE(NameRangesForNode(node_def, bad_op_def, &inputs, &outputs).ok());
}
TEST(NameRangesForNodeTest, Polymorphic) {
const OpDef op_def = ToOpDef(OpDefBuilder("Polymorphic")
.Input("a: T")
.Input("b: T")
.Output("c: T")
.Attr("T: type"));
NameRangeMap inputs, outputs;
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("poly", &op_def)
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}}), outputs);
EXPECT_EQ("{{node poly}} = Polymorphic[T=DT_INT32](a, b)",
SummarizeNodeDef(node_def1));
const NodeDef node_def2 =
ToNodeDef(std::move(NodeDefBuilder("poly", &op_def)
.Input(FakeInput(DT_BOOL))
.Input(FakeInput(DT_BOOL))));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}}), outputs);
EXPECT_EQ("{{node poly}} = Polymorphic[T=DT_BOOL](a, b)",
SummarizeNodeDef(node_def2));
}
TEST(NameRangesForNodeTest, NRepeats) {
const OpDef op_def = ToOpDef(OpDefBuilder("NRepeats")
.Input("a: N * int32")
.Input("b: N * T")
.Output("c: T")
.Output("d: N * string")
.Output("e: M * bool")
.Attr("N: int")
.Attr("M: int")
.Attr("T: type"));
NameRangeMap inputs, outputs;
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("nr", &op_def)
.Input(FakeInput(4, DT_INT32))
.Input(FakeInput(4, DT_FLOAT))
.Attr("M", 3)));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 4}}, {"b", {4, 8}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 5}}, {"e", {5, 8}}}),
outputs);
EXPECT_EQ(
"{{node nr}} = NRepeats[M=3, N=4, T=DT_FLOAT](a, a:1, a:2, a:3, b, b:1, "
"b:2, b:3)",
SummarizeNodeDef(node_def1));
const NodeDef node_def2 =
ToNodeDef(std::move(NodeDefBuilder("nr", &op_def)
.Input(FakeInput(2, DT_INT32))
.Input(FakeInput(2, DT_DOUBLE))
.Attr("M", 7)));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 2}}, {"b", {2, 4}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 3}}, {"e", {3, 10}}}),
outputs);
EXPECT_EQ("{{node nr}} = NRepeats[M=7, N=2, T=DT_DOUBLE](a, a:1, b, b:1)",
SummarizeNodeDef(node_def2));
NodeDef bad_node_def = node_def2;
bad_node_def.clear_attr();
EXPECT_FALSE(NameRangesForNode(bad_node_def, op_def, &inputs, &outputs).ok());
}
TEST(NameRangesForNodeTest, TypeList) {
const OpDef op_def = ToOpDef(OpDefBuilder("TypeList")
.Input("a: T1")
.Input("b: T2")
.Output("c: T2")
.Output("d: T3")
.Output("e: T1")
.Attr("T1: list(type)")
.Attr("T2: list(type)")
.Attr("T3: list(type)"));
NameRangeMap inputs, outputs;
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("tl", &op_def)
.Input(FakeInput({DT_BOOL, DT_FLOAT}))
.Input(FakeInput(4, DT_FLOAT))
.Attr("T3", {DT_INT32, DT_DOUBLE, DT_STRING})));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 2}}, {"b", {2, 6}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 4}}, {"d", {4, 7}}, {"e", {7, 9}}}),
outputs);
EXPECT_EQ(
"{{node tl}} = TypeList[T1=[DT_BOOL, DT_FLOAT],"
" T2=[DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT],"
" T3=[DT_INT32, DT_DOUBLE, DT_STRING]](a, a:1, b, b:1, b:2, b:3)",
SummarizeNodeDef(node_def1));
const NodeDef node_def2 =
ToNodeDef(std::move(NodeDefBuilder("tl", &op_def)
.Input(FakeInput(7, DT_INT32))
.Input(FakeInput({DT_DOUBLE}))
.Attr("T3", {DT_DOUBLE, DT_STRING})));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 7}}, {"b", {7, 8}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 3}}, {"e", {3, 10}}}),
outputs);
EXPECT_EQ(
"{{node tl}} = TypeList[T1=[DT_INT32, DT_INT32, DT_INT32, DT_INT32, "
"DT_INT32,"
" DT_INT32, DT_INT32], T2=[DT_DOUBLE], T3=[DT_DOUBLE, DT_STRING]]"
"(a, a:1, a:2, a:3, a:4, a:5, a:6, b)",
SummarizeNodeDef(node_def2));
NodeDef bad_node_def = node_def2;
bad_node_def.clear_attr();
EXPECT_FALSE(NameRangesForNode(bad_node_def, op_def, &inputs, &outputs).ok());
}
TEST(AddPrefixAndSuffixToNode, Enter) {
NodeDef node_def;
node_def.set_name("enter");
node_def.set_op("Enter");
AddNodeAttr("frame_name", "test_frame", &node_def);
const string prefix = "prefix/";
const string suffix = "/suffix";
TF_ASSERT_OK(AddPrefixAndSuffixToNode(prefix, suffix, &node_def));
EXPECT_EQ("prefix/enter/suffix", node_def.name());
string frame_name;
TF_ASSERT_OK(GetNodeAttr(node_def, "frame_name", &frame_name));
EXPECT_EQ("prefix/test_frame/suffix", frame_name);
}
TEST(MaybeAddPrefixToColocationConstraints, Basic) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
AddNodeAttr(kColocationAttrName,
{strings::StrCat(kColocationGroupPrefix, "Node1"),
strings::StrCat(kColocationGroupPrefix, "Node2"),
strings::StrCat(kColocationGroupPrefix, "Node3")},
&node_def);
std::unordered_set<string> match;
match.insert("Node1");
match.insert("Node3");
TF_ASSERT_OK(MaybeAddPrefixToColocationConstraints(match, "fn/", &node_def));
std::vector<string> coloc_constraints;
TF_ASSERT_OK(GetNodeAttr(node_def, kColocationAttrName, &coloc_constraints));
EXPECT_EQ(
coloc_constraints,
std::vector<string>({"loc:@fn/Node1", "loc:@Node2", "loc:@fn/Node3"}));
}
TEST(MaybeAddPrefixToColocationConstraints, NoConstraints) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
std::unordered_set<string> match;
match.insert("Node1");
match.insert("Node3");
TF_ASSERT_OK(MaybeAddPrefixToColocationConstraints(match, "fn/", &node_def));
EXPECT_FALSE(HasNodeAttr(node_def, kColocationAttrName));
}
TEST(MaybeUpdateColocationConstraintsWithMap, Basic) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
AddNodeAttr(kColocationAttrName,
{strings::StrCat(kColocationGroupPrefix, "Node1"),
strings::StrCat(kColocationGroupPrefix, "Node2"),
strings::StrCat(kColocationGroupPrefix, "Node3")},
&node_def);
std::map<absl::string_view, absl::string_view> node_map;
node_map["Node1"] = "Node4";
node_map["Invalid"] = "Node5";
TF_ASSERT_OK(MaybeUpdateColocationConstraintsWithMap(node_map, &node_def));
std::vector<string> coloc_constraints;
TF_ASSERT_OK(GetNodeAttr(node_def, kColocationAttrName, &coloc_constraints));
EXPECT_EQ(coloc_constraints,
std::vector<string>({"loc:@Node4", "loc:@Node2", "loc:@Node3"}));
}
TEST(MaybeUpdateColocationConstraintsWithMap, NoConstraints) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
std::map<absl::string_view, absl::string_view> node_map;
node_map["Node1"] = "Node4";
node_map["Invalid"] = "Node5";
TF_ASSERT_OK(MaybeUpdateColocationConstraintsWithMap(node_map, &node_def));
EXPECT_FALSE(HasNodeAttr(node_def, kColocationAttrName));
}
TEST(FormatNodeForErrorTest, Node) {
Graph g(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("enter", "NoOp").Finalize(&g, &node));
EXPECT_EQ("{{node enter}}", FormatNodeForError(*node));
}
TEST(FormatNodeForErrorTest, NodeDef) {
NodeDef node_def;
node_def.set_name("enter");
node_def.set_op("Enter");
AddNodeAttr("frame_name", "test_frame", &node_def);
EXPECT_EQ("{{node enter}}", FormatNodeDefForError(node_def));
}
TEST(FormatNodeForErrorTest, NodeDefWithOriginalNames) {
NodeDef node_def;
node_def.set_name("enter");
node_def.set_op("Enter");
AddNodeAttr("frame_name", "test_frame", &node_def);
*(node_def.mutable_experimental_debug_info()->add_original_node_names()) =
"node_name";
*(node_def.mutable_experimental_debug_info()->add_original_func_names()) =
"func_name";
EXPECT_EQ("{{function_node func_name}}{{node node_name}}",
FormatNodeDefForError(node_def));
*(node_def.mutable_experimental_debug_info()->add_original_node_names()) =
"node_name2";
*(node_def.mutable_experimental_debug_info()->add_original_func_names()) =
"func_name2";
EXPECT_EQ(
"{{function_node func_name}}{{node node_name}}, "
"{{function_node func_name2}}{{node node_name2}}",
FormatNodeDefForError(node_def));
}
TEST(AttachDef, AllowMultipleFormattedNode) {
NodeDef a;
a.set_name("a");
NodeDef b;
b.set_name("b");
Status s = Status(absl::StatusCode::kCancelled, "Error");
Status s2 = AttachDef(s, a, true);
EXPECT_EQ("Error\n\t [[{{node a}}]]", s2.message());
Status s3 = AttachDef(s2, b, true);
EXPECT_EQ("Error\n\t [[{{node a}}]]\n\t [[{{node b}}]]", s3.message());
}
TEST(AttachDef, DisallowMultipleFormattedNode) {
NodeDef a;
a.set_name("a");
NodeDef b;
b.set_name("b");
Status s = Status(absl::StatusCode::kCancelled, "Error");
Status s2 = AttachDef(s, a, false);
EXPECT_EQ("Error\n\t [[{{node a}}]]", s2.message());
Status s3 = AttachDef(s2, b, false);
EXPECT_EQ("Error\n\t [[{{node a}}]]\n\t [[b]]", s3.message());
}
}
} | Status OutputTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* outputs) {
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, outputs));
}
return OkStatus();
} | TEST(OutputTypesForNode_AttrSliceOverload, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const AttrSlice attr_slice =
AttrSlice(ToNodeDef(std::move(NodeDefBuilder("simple", &op_def)
.Input(FakeInput())
.Input(FakeInput()))));
DataTypeVector types;
EXPECT_TRUE(OutputTypesForNode(attr_slice, op_def, &types).ok());
EXPECT_EQ(types[0], DT_STRING);
EXPECT_EQ(types[1], DT_BOOL);
} |
#include "xla/service/while_util.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrCat;
static absl::StatusOr<
std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenWhileCondition(HloComputation* narrow_condition, const Shape& wide_shape) {
const Shape& narrow_shape =
narrow_condition->parameter_instruction(0)->shape();
HloComputation* wide_while_cond = [&]() {
HloComputation::Builder builder(StrCat("wide.", narrow_condition->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.",
narrow_condition->parameter_instruction(0)->name())));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
return narrow_condition->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_while_cond->parameter_instruction(0),
narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
wide_while_cond->parameter_instruction(0)->name()));
HloInstruction* call_narrow_cond = wide_while_cond->AddInstruction(
HloInstruction::CreateCall(ShapeUtil::MakeShape(PRED, {}),
{truncated_parameter}, narrow_condition));
wide_while_cond->set_root_instruction(call_narrow_cond);
TF_ASSIGN_OR_RETURN(auto inlined_instructions_map,
CallInliner::Inline(call_narrow_cond));
return {{wide_while_cond, std::move(inlined_instructions_map)}};
}
static absl::StatusOr<
std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenWhileBody(HloComputation* narrow_body, const Shape& wide_shape) {
const Shape& narrow_shape = narrow_body->parameter_instruction(0)->shape();
HloComputation* wide_while_body = [&]() {
HloComputation::Builder builder(StrCat("wide.", narrow_body->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.", narrow_body->parameter_instruction(0)->name())));
return narrow_body->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* wide_parameter = wide_while_body->parameter_instruction(0);
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_parameter, narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
wide_while_body->parameter_instruction(0)->name()));
HloInstruction* call_narrow_body =
wide_while_body->AddInstruction(HloInstruction::CreateCall(
narrow_shape, {truncated_parameter}, narrow_body));
std::vector<HloInstruction*> live_through_values;
for (int i = narrow_shape.tuple_shapes_size();
i < wide_shape.tuple_shapes_size(); i++) {
live_through_values.push_back(wide_while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(wide_shape.tuple_shapes(i),
wide_parameter, i),
absl::StrCat(wide_while_body->name(), ".through.",
i - narrow_shape.tuple_shapes_size())));
}
wide_while_body->set_root_instruction(
TupleUtil::AppendSuffix(call_narrow_body, live_through_values));
TF_ASSIGN_OR_RETURN(auto inlined_instructions_map,
CallInliner::Inline(call_narrow_body));
return {{wide_while_body, std::move(inlined_instructions_map)}};
}
absl::StatusOr<WhileUtil::MakeInstructionsLiveInResult>
WhileUtil::MakeInstructionsLiveIn(
HloInstruction* while_instr,
absl::Span<HloInstruction* const> instructions) {
CHECK(while_instr->shape().IsTuple());
int elements_in_old_while_shape = while_instr->shape().tuple_shapes_size();
Shape new_while_shape = while_instr->shape();
for (auto* instruction : instructions) {
*new_while_shape.add_tuple_shapes() = instruction->shape();
}
HloComputation* new_while_condition;
CallInliner::InlinedInstructionMap inlined_condition_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_condition, inlined_condition_instructions_map),
WidenWhileCondition(while_instr->while_condition(), new_while_shape));
HloComputation* new_while_body;
CallInliner::InlinedInstructionMap inlined_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_body, inlined_instructions_map),
WidenWhileBody(while_instr->while_body(), new_while_shape));
HloInstruction* new_while_init =
TupleUtil::AppendSuffix(while_instr->mutable_operand(0), instructions);
HloComputation* containing_computation = while_instr->parent();
HloInstruction* new_while = containing_computation->AddInstruction(
HloInstruction::CreateWhile(new_while_shape, new_while_condition,
new_while_body, new_while_init));
HloInstruction* replacement_instr = TupleUtil::ExtractPrefix(
new_while, while_instr->shape().tuple_shapes_size());
TF_RETURN_IF_ERROR(while_instr->ReplaceAllUsesWith(replacement_instr));
TF_RETURN_IF_ERROR(containing_computation->RemoveInstruction(while_instr));
HloInstruction* while_body_param = new_while_body->parameter_instruction(0);
std::vector<HloInstruction*> live_in_instructions;
for (int64_t i = elements_in_old_while_shape;
i < new_while_shape.tuple_shapes_size(); i++) {
live_in_instructions.push_back(new_while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(
instructions[i - elements_in_old_while_shape]->shape(),
while_body_param, i),
absl::StrCat(new_while_body->name(), ".in.",
i - elements_in_old_while_shape)));
}
WhileUtil::MakeInstructionsLiveInResult result;
result.new_while_instr = new_while;
result.replacement_instr = replacement_instr;
result.while_body_live_in_values = std::move(live_in_instructions);
result.while_body_instruction_map = std::move(inlined_instructions_map);
result.while_condition_instruction_map =
std::move(inlined_condition_instructions_map);
return std::move(result);
}
static absl::StatusOr<std::unique_ptr<HloComputation>>
MakeCountedLoopConditionComputation(const Shape& loop_state_shape,
int32_t trip_count) {
Shape scalar_pred = ShapeUtil::MakeShape(PRED, {});
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloComputation> cond_computation,
CreateComputationWithSignature(
{&loop_state_shape}, scalar_pred, "while_cond"));
HloInstruction* trip_count_constant =
cond_computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(trip_count)));
HloInstruction* param = cond_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
MakeGetTupleElementHlo(param, 0));
TF_ASSIGN_OR_RETURN(
HloInstruction * compare,
MakeCompareHlo(ComparisonDirection::kLt, indvar, trip_count_constant));
cond_computation->set_root_instruction(compare);
return std::move(cond_computation);
}
static absl::StatusOr<std::unique_ptr<HloComputation>>
MakeCountedLoopBodyComputation(
const Shape& loop_state_shape,
absl::FunctionRef<absl::StatusOr<WhileUtil::LoopStateTy>(
HloInstruction*, const WhileUtil::LoopStateTy&)>
loop_body_generator) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloComputation> body_computation,
CreateComputationWithSignature(
{&loop_state_shape}, loop_state_shape, "while_body"));
HloInstruction* one = body_computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
HloInstruction* param = body_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
MakeGetTupleElementHlo(param, 0));
TF_ASSIGN_OR_RETURN(HloInstruction * next_indvar,
MakeBinaryHlo(HloOpcode::kAdd, indvar, one));
std::vector<HloInstruction*> loop_body_generator_args;
for (int i = 1, e = loop_state_shape.tuple_shapes_size(); i < e; i++) {
TF_ASSIGN_OR_RETURN(HloInstruction * tuple_element,
MakeGetTupleElementHlo(param, i));
loop_body_generator_args.push_back(tuple_element);
}
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> next_state,
loop_body_generator(indvar, loop_body_generator_args));
next_state.insert(next_state.begin(), next_indvar);
HloInstruction* next_state_tuple =
body_computation->AddInstruction(HloInstruction::CreateTuple(next_state));
body_computation->set_root_instruction(next_state_tuple);
return std::move(body_computation);
}
static std::pair<std::unique_ptr<HloInstruction>,
std::unique_ptr<HloInstruction>>
MakeInitTupleFromInitValues(const WhileUtil::LoopStateTy& init_values) {
std::vector<HloInstruction*> init_values_with_indvar;
init_values_with_indvar.reserve(init_values.size() + 1);
std::unique_ptr<HloInstruction> zero =
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0));
init_values_with_indvar.push_back(zero.get());
absl::c_copy(init_values, std::back_inserter(init_values_with_indvar));
return std::make_pair(std::move(zero),
HloInstruction::CreateTuple(init_values_with_indvar));
}
static Shape MakeLoopStateShapeWithLayout(
const WhileUtil::LoopStateTy& init_values) {
std::vector<Shape> loop_state_shape_components;
loop_state_shape_components.reserve(init_values.size() + 1);
loop_state_shape_components.push_back(ShapeUtil::MakeShape(S32, {}));
absl::c_transform(init_values,
std::back_inserter(loop_state_shape_components),
[](HloInstruction* instr) {
Shape shape = instr->shape();
if (!shape.has_layout()) {
LayoutUtil::SetToDefaultLayout(&shape);
}
return shape;
});
return ShapeUtil::MakeTupleShape(loop_state_shape_components);
}
absl::StatusOr<WhileUtil::OwningLoopStateTy>
WhileUtil::MakeCountedLoop(HloModule* module, int32_t trip_count,
const WhileUtil::LoopStateTy& init_values,
WhileUtil::LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata) {
CHECK_GE(trip_count, 0);
Shape loop_state_shape = MakeLoopStateShapeWithLayout(init_values);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> cond,
MakeCountedLoopConditionComputation(loop_state_shape, trip_count));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> body,
MakeCountedLoopBodyComputation(loop_state_shape, loop_body_generator));
std::unique_ptr<HloInstruction> owned_indvar;
std::unique_ptr<HloInstruction> owned_init_tuple;
std::tie(owned_indvar, owned_init_tuple) =
MakeInitTupleFromInitValues(init_values);
std::unique_ptr<HloInstruction> owned_while = HloInstruction::CreateWhile(
loop_state_shape, module->AddEmbeddedComputation(std::move(cond)),
module->AddEmbeddedComputation(std::move(body)), owned_init_tuple.get());
owned_while->set_metadata(metadata);
HloInstruction* while_instr = owned_while.get();
std::vector<std::unique_ptr<HloInstruction>> owned;
owned.push_back(std::move(owned_indvar));
owned.push_back(std::move(owned_init_tuple));
owned.push_back(std::move(owned_while));
std::vector<HloInstruction*> while_results;
for (int64_t i = 0, e = init_values.size(); i < e; i++) {
std::unique_ptr<HloInstruction> user_state =
HloInstruction::CreateGetTupleElement(init_values[i]->shape(),
while_instr, i + 1);
while_results.push_back(user_state.get());
owned.push_back(std::move(user_state));
}
return WhileUtil::OwningLoopStateTy{std::move(owned), while_results};
}
absl::StatusOr<WhileUtil::LoopStateTy> WhileUtil::MakeCountedLoop(
HloComputation* computation, int32_t trip_count,
const WhileUtil::LoopStateTy& init_values,
WhileUtil::LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata) {
TF_ASSIGN_OR_RETURN(
auto owning_loop_state,
MakeCountedLoop(computation->parent(), trip_count, init_values,
loop_body_generator, metadata));
for (auto& instruction_to_add : owning_loop_state.instructions_to_add) {
computation->AddInstruction(std::move(instruction_to_add));
}
return owning_loop_state.while_results;
}
std::vector<HloInstruction*> WhileUtil::GetInvariantGTEsForWhileBody(
const HloComputation& while_body) {
std::vector<HloInstruction*> result;
const HloInstruction::InstructionVector root_operands =
while_body.root_instruction()->operands();
for (int i = 0; i < root_operands.size(); i++) {
HloInstruction* instr = root_operands[i];
if (instr->opcode() == HloOpcode::kGetTupleElement &&
instr->tuple_index() == i &&
instr->operand(0) == while_body.parameter_instruction(0)) {
result.push_back(instr);
}
}
return result;
}
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
WhileUtil::GetGTEsMapForWhileConditional(
const HloComputation& while_conditional) {
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>> result;
for (HloInstruction* user :
while_conditional.parameter_instruction(0)->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
result[user->tuple_index()].push_back(user);
}
}
return result;
}
} | #include "xla/service/while_util.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class WhileUtilTest : public HloTestBase {
protected:
absl::StatusOr<std::unique_ptr<VerifiedHloModule>> GetParsedModule(
HloComputation** entry_computation, HloInstruction** param0,
HloInstruction** param1, HloInstruction** param2) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
while_body {
ROOT p_body = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
}
while_condition {
p_cond = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
p_entry_0 = f32[32,32]{1,0} parameter(0)
p_entry_1 = s32[32,32]{1,0} parameter(1)
p_entry_2 = s64[32,32]{1,0} parameter(2)
while_init = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p_entry_0, p_entry_0)
ROOT while = (f32[32,32]{1,0}, f32[32,32]{1,0}) while(while_init), condition=while_condition, body=while_body
}
)";
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string));
*entry_computation = module->entry_computation();
*param0 = (*entry_computation)->parameter_instruction(0);
*param1 = (*entry_computation)->parameter_instruction(1);
*param2 = (*entry_computation)->parameter_instruction(2);
return std::move(module);
}
};
TEST_F(WhileUtilTest, MakeZeroInstructionsLiveOp) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr, {}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(param_reconstructed, 0),
op::GetTupleElement(param_reconstructed, 1)));
}
TEST_F(WhileUtilTest, MakeTwoInstructionsLive) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{param0, param1}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
XLA_VLOG_LINES(3, module->ToString());
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto first_half_param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(first_half_param_reconstructed, 0),
op::GetTupleElement(first_half_param_reconstructed, 1),
op::GetTupleElement(op::Parameter(0), 2),
op::GetTupleElement(op::Parameter(0), 3)));
}
TEST_F(WhileUtilTest, GetInvariantGTEsForWhileBody) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* while_body = module->GetComputationWithName("body");
ASSERT_NE(while_body, nullptr)
<< "Expected exactly one while_body computation";
std::vector<HloInstruction*> gte_list =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
ASSERT_EQ(gte_list.size(), 1);
EXPECT_EQ((*gte_list.begin())->name(), "gte.0");
}
TEST_F(WhileUtilTest, AlwaysRemovePreviousWhileBody) {
const char* const hlo_string = R"(
HloModule WhileWithSideEffects
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
token0 = token[] after-all()
infeed = (pred[], token[]) infeed(token0)
ROOT condition = pred[] get-tuple-element(infeed), index=0
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
to_make_live_in = f32[100] parameter(1)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* main = module->GetComputationWithName("main");
HloInstruction* while_instr = main->root_instruction();
HloInstruction* to_make_live_in = main->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{to_make_live_in}));
auto is_while = [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kWhile;
};
EXPECT_EQ(absl::c_count_if(main->instructions(), is_while), 1);
}
}
} | absl::StatusOr<WhileUtil::MakeInstructionsLiveInResult>
WhileUtil::MakeInstructionsLiveIn(
HloInstruction* while_instr,
absl::Span<HloInstruction* const> instructions) {
CHECK(while_instr->shape().IsTuple());
int elements_in_old_while_shape = while_instr->shape().tuple_shapes_size();
Shape new_while_shape = while_instr->shape();
for (auto* instruction : instructions) {
*new_while_shape.add_tuple_shapes() = instruction->shape();
}
HloComputation* new_while_condition;
CallInliner::InlinedInstructionMap inlined_condition_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_condition, inlined_condition_instructions_map),
WidenWhileCondition(while_instr->while_condition(), new_while_shape));
HloComputation* new_while_body;
CallInliner::InlinedInstructionMap inlined_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_body, inlined_instructions_map),
WidenWhileBody(while_instr->while_body(), new_while_shape));
HloInstruction* new_while_init =
TupleUtil::AppendSuffix(while_instr->mutable_operand(0), instructions);
HloComputation* containing_computation = while_instr->parent();
HloInstruction* new_while = containing_computation->AddInstruction(
HloInstruction::CreateWhile(new_while_shape, new_while_condition,
new_while_body, new_while_init));
HloInstruction* replacement_instr = TupleUtil::ExtractPrefix(
new_while, while_instr->shape().tuple_shapes_size());
TF_RETURN_IF_ERROR(while_instr->ReplaceAllUsesWith(replacement_instr));
TF_RETURN_IF_ERROR(containing_computation->RemoveInstruction(while_instr));
HloInstruction* while_body_param = new_while_body->parameter_instruction(0);
std::vector<HloInstruction*> live_in_instructions;
for (int64_t i = elements_in_old_while_shape;
i < new_while_shape.tuple_shapes_size(); i++) {
live_in_instructions.push_back(new_while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(
instructions[i - elements_in_old_while_shape]->shape(),
while_body_param, i),
absl::StrCat(new_while_body->name(), ".in.",
i - elements_in_old_while_shape)));
}
WhileUtil::MakeInstructionsLiveInResult result;
result.new_while_instr = new_while;
result.replacement_instr = replacement_instr;
result.while_body_live_in_values = std::move(live_in_instructions);
result.while_body_instruction_map = std::move(inlined_instructions_map);
result.while_condition_instruction_map =
std::move(inlined_condition_instructions_map);
return std::move(result);
} | TEST_F(WhileUtilTest, MakeZeroInstructionsLiveOp) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr, {}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(param_reconstructed, 0),
op::GetTupleElement(param_reconstructed, 1)));
}
TEST_F(WhileUtilTest, MakeTwoInstructionsLive) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{param0, param1}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
XLA_VLOG_LINES(3, module->ToString());
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto first_half_param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(first_half_param_reconstructed, 0),
op::GetTupleElement(first_half_param_reconstructed, 1),
op::GetTupleElement(op::Parameter(0), 2),
op::GetTupleElement(op::Parameter(0), 3)));
}
TEST_F(WhileUtilTest, AlwaysRemovePreviousWhileBody) {
const char* const hlo_string = R"(
HloModule WhileWithSideEffects
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
token0 = token[] after-all()
infeed = (pred[], token[]) infeed(token0)
ROOT condition = pred[] get-tuple-element(infeed), index=0
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
to_make_live_in = f32[100] parameter(1)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* main = module->GetComputationWithName("main");
HloInstruction* while_instr = main->root_instruction();
HloInstruction* to_make_live_in = main->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{to_make_live_in}));
auto is_while = [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kWhile;
};
EXPECT_EQ(absl::c_count_if(main->instructions(), is_while), 1);
} |
#include "absl/base/internal/low_level_alloc.h"
#include <type_traits>
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/internal/direct_mmap.h"
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/macros.h"
#include "absl/base/thread_annotations.h"
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#ifndef _WIN32
#include <pthread.h>
#include <signal.h>
#include <sys/mman.h>
#include <unistd.h>
#else
#include <windows.h>
#endif
#ifdef __linux__
#include <sys/prctl.h>
#endif
#include <string.h>
#include <algorithm>
#include <atomic>
#include <cerrno>
#include <cstddef>
#include <new>
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
static const int kMaxLevel = 30;
namespace {
struct AllocList {
struct Header {
uintptr_t size;
uintptr_t magic;
LowLevelAlloc::Arena *arena;
void *dummy_for_alignment;
} header;
int levels;
AllocList *next[kMaxLevel];
};
}
static int IntLog2(size_t size, size_t base) {
int result = 0;
for (size_t i = size; i > base; i >>= 1) {
result++;
}
return result;
}
static int Random(uint32_t *state) {
uint32_t r = *state;
int result = 1;
while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) {
result++;
}
*state = r;
return result;
}
static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
if (level > kMaxLevel - 1) level = kMaxLevel - 1;
ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
return level;
}
static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e,
AllocList **prev) {
AllocList *p = head;
for (int level = head->levels - 1; level >= 0; level--) {
for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
}
prev[level] = p;
}
return (head->levels == 0) ? nullptr : prev[0]->next[0];
}
static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
AllocList **prev) {
LLA_SkiplistSearch(head, e, prev);
for (; head->levels < e->levels; head->levels++) {
prev[head->levels] = head;
}
for (int i = 0; i != e->levels; i++) {
e->next[i] = prev[i]->next[i];
prev[i]->next[i] = e;
}
}
static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
AllocList **prev) {
AllocList *found = LLA_SkiplistSearch(head, e, prev);
ABSL_RAW_CHECK(e == found, "element not in freelist");
for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
prev[i]->next[i] = e->next[i];
}
while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
head->levels--;
}
}
struct LowLevelAlloc::Arena {
explicit Arena(uint32_t flags_value);
base_internal::SpinLock mu;
AllocList freelist ABSL_GUARDED_BY(mu);
int32_t allocation_count ABSL_GUARDED_BY(mu);
const uint32_t flags;
const size_t pagesize;
const size_t round_up;
const size_t min_size;
uint32_t random ABSL_GUARDED_BY(mu);
};
namespace {
alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof(
LowLevelAlloc::Arena)];
alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof(
LowLevelAlloc::Arena)];
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
alignas(
LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage
[sizeof(LowLevelAlloc::Arena)];
#endif
absl::once_flag create_globals_once;
void CreateGlobalArenas() {
new (&default_arena_storage)
LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
new (&unhooked_async_sig_safe_arena_storage)
LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
#endif
}
LowLevelAlloc::Arena *UnhookedArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage);
}
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
return reinterpret_cast<LowLevelAlloc::Arena *>(
&unhooked_async_sig_safe_arena_storage);
}
#endif
}
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage);
}
static const uintptr_t kMagicAllocated = 0x4c833e95U;
static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
namespace {
class ABSL_SCOPED_LOCKABLE ArenaLock {
public:
explicit ArenaLock(LowLevelAlloc::Arena *arena)
ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: arena_(arena) {
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
sigset_t all;
sigfillset(&all);
mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
}
#endif
arena_->mu.Lock();
}
~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
void Leave() ABSL_UNLOCK_FUNCTION() {
arena_->mu.Unlock();
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (mask_valid_) {
const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
if (err != 0) {
ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
}
}
#endif
left_ = true;
}
private:
bool left_ = false;
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
bool mask_valid_ = false;
sigset_t mask_;
#endif
LowLevelAlloc::Arena *arena_;
ArenaLock(const ArenaLock &) = delete;
ArenaLock &operator=(const ArenaLock &) = delete;
};
}
inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
return magic ^ reinterpret_cast<uintptr_t>(ptr);
}
namespace {
size_t GetPageSize() {
#ifdef _WIN32
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
#elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__)
return getpagesize();
#else
return static_cast<size_t>(sysconf(_SC_PAGESIZE));
#endif
}
size_t RoundedUpBlockSize() {
size_t round_up = 16;
while (round_up < sizeof(AllocList::Header)) {
round_up += round_up;
}
return round_up;
}
}
LowLevelAlloc::Arena::Arena(uint32_t flags_value)
: mu(base_internal::SCHEDULE_KERNEL_ONLY),
allocation_count(0),
flags(flags_value),
pagesize(GetPageSize()),
round_up(RoundedUpBlockSize()),
min_size(2 * round_up),
random(0) {
freelist.header.size = 0;
freelist.header.magic = Magic(kMagicUnallocated, &freelist.header);
freelist.header.arena = this;
freelist.levels = 0;
memset(freelist.next, 0, sizeof(freelist.next));
}
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) {
Arena *meta_data_arena = DefaultArena();
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
meta_data_arena = UnhookedAsyncSigSafeArena();
} else
#endif
if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
meta_data_arena = UnhookedArena();
}
Arena *result =
new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags);
return result;
}
bool LowLevelAlloc::DeleteArena(Arena *arena) {
ABSL_RAW_CHECK(
arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
"may not delete default arena");
ArenaLock section(arena);
if (arena->allocation_count != 0) {
section.Leave();
return false;
}
while (arena->freelist.next[0] != nullptr) {
AllocList *region = arena->freelist.next[0];
size_t size = region->header.size;
arena->freelist.next[0] = region->next[0];
ABSL_RAW_CHECK(
region->header.magic == Magic(kMagicUnallocated, ®ion->header),
"bad magic number in DeleteArena()");
ABSL_RAW_CHECK(region->header.arena == arena,
"bad arena pointer in DeleteArena()");
ABSL_RAW_CHECK(size % arena->pagesize == 0,
"empty arena has non-page-aligned block size");
ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
"empty arena has non-page-aligned block");
int munmap_result;
#ifdef _WIN32
munmap_result = VirtualFree(region, 0, MEM_RELEASE);
ABSL_RAW_CHECK(munmap_result != 0,
"LowLevelAlloc::DeleteArena: VitualFree failed");
#else
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size);
} else {
munmap_result = base_internal::DirectMunmap(region, size);
}
#else
munmap_result = munmap(region, size);
#endif
if (munmap_result != 0) {
ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
errno);
}
#endif
}
section.Leave();
arena->~Arena();
Free(arena);
return true;
}
static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
uintptr_t sum = a + b;
ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
return sum;
}
static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
return CheckedAdd(addr, align - 1) & ~(align - 1);
}
static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
AllocList *next = prev->next[i];
if (next != nullptr) {
ABSL_RAW_CHECK(
next->header.magic == Magic(kMagicUnallocated, &next->header),
"bad magic number in Next()");
ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
if (prev != &arena->freelist) {
ABSL_RAW_CHECK(prev < next, "unordered freelist");
ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
reinterpret_cast<char *>(next),
"malformed freelist");
}
}
return next;
}
static void Coalesce(AllocList *a) {
AllocList *n = a->next[0];
if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
reinterpret_cast<char *>(n)) {
LowLevelAlloc::Arena *arena = a->header.arena;
a->header.size += n->header.size;
n->header.magic = 0;
n->header.arena = nullptr;
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, n, prev);
LLA_SkiplistDelete(&arena->freelist, a, prev);
a->levels =
LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random);
LLA_SkiplistInsert(&arena->freelist, a, prev);
}
}
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
sizeof(f->header));
ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in AddToFreelist()");
ABSL_RAW_CHECK(f->header.arena == arena,
"bad arena pointer in AddToFreelist()");
f->levels =
LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random);
AllocList *prev[kMaxLevel];
LLA_SkiplistInsert(&arena->freelist, f, prev);
f->header.magic = Magic(kMagicUnallocated, &f->header);
Coalesce(f);
Coalesce(prev[0]);
}
void LowLevelAlloc::Free(void *v) {
if (v != nullptr) {
AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
sizeof(f->header));
LowLevelAlloc::Arena *arena = f->header.arena;
ArenaLock section(arena);
AddToFreelist(v, arena);
ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
arena->allocation_count--;
section.Leave();
}
}
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
void *result = nullptr;
if (request != 0) {
AllocList *s;
ArenaLock section(arena);
size_t req_rnd =
RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up);
for (;;) {
int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
if (i < arena->freelist.levels) {
AllocList *before = &arena->freelist;
while ((s = Next(i, before, arena)) != nullptr &&
s->header.size < req_rnd) {
before = s;
}
if (s != nullptr) {
break;
}
}
arena->mu.Unlock();
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
void *new_pages;
#ifdef _WIN32
new_pages = VirtualAlloc(nullptr, new_pages_size,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
#else
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
} else {
new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
}
#else
new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
#endif
if (new_pages == MAP_FAILED) {
ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
}
#ifdef __linux__
#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size,
"absl");
#endif
#endif
#endif
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
s->header.size = new_pages_size;
s->header.magic = Magic(kMagicAllocated, &s->header);
s->header.arena = arena;
AddToFreelist(&s->levels, arena);
}
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, s, prev);
if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
AllocList *n =
reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s));
n->header.size = s->header.size - req_rnd;
n->header.magic = Magic(kMagicAllocated, &n->header);
n->header.arena = arena;
s->header.size = req_rnd;
AddToFreelist(&n->levels, arena);
}
s->header.magic = Magic(kMagicAllocated, &s->header);
ABSL_RAW_CHECK(s->header.arena == arena, "");
arena->allocation_count++;
section.Leave();
result = &s->levels;
}
ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
return result;
}
void *LowLevelAlloc::Alloc(size_t request) {
void *result = DoAllocWithArena(request, DefaultArena());
return result;
}
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
void *result = DoAllocWithArena(request, arena);
return result;
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/base/internal/low_level_alloc.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <thread>
#include <unordered_map>
#include <utility>
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif
#include "absl/container/node_hash_map.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
#define TEST_ASSERT(x) \
if (!(x)) { \
printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \
abort(); \
}
struct BlockDesc {
char *ptr;
int len;
int fill;
};
static void CheckBlockDesc(const BlockDesc &d) {
for (int i = 0; i != d.len; i++) {
TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff));
}
}
static void RandomizeBlockDesc(BlockDesc *d) {
d->fill = rand() & 0xff;
for (int i = 0; i != d->len; i++) {
d->ptr[i] = (d->fill + i) & 0xff;
}
}
static bool using_low_level_alloc = false;
static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
typedef absl::node_hash_map<int, BlockDesc> AllocMap;
AllocMap allocated;
AllocMap::iterator it;
BlockDesc block_desc;
int rnd;
LowLevelAlloc::Arena *arena = nullptr;
if (use_new_arena) {
int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0;
arena = LowLevelAlloc::NewArena(flags);
}
for (int i = 0; i != n; i++) {
if (i != 0 && i % 10000 == 0) {
printf(".");
fflush(stdout);
}
switch (rand() & 1) {
case 0:
using_low_level_alloc = true;
block_desc.len = rand() & 0x3fff;
block_desc.ptr = reinterpret_cast<char *>(
arena == nullptr
? LowLevelAlloc::Alloc(block_desc.len)
: LowLevelAlloc::AllocWithArena(block_desc.len, arena));
using_low_level_alloc = false;
RandomizeBlockDesc(&block_desc);
rnd = rand();
it = allocated.find(rnd);
if (it != allocated.end()) {
CheckBlockDesc(it->second);
using_low_level_alloc = true;
LowLevelAlloc::Free(it->second.ptr);
using_low_level_alloc = false;
it->second = block_desc;
} else {
allocated[rnd] = block_desc;
}
break;
case 1:
it = allocated.begin();
if (it != allocated.end()) {
CheckBlockDesc(it->second);
using_low_level_alloc = true;
LowLevelAlloc::Free(it->second.ptr);
using_low_level_alloc = false;
allocated.erase(it);
}
break;
}
}
while ((it = allocated.begin()) != allocated.end()) {
CheckBlockDesc(it->second);
using_low_level_alloc = true;
LowLevelAlloc::Free(it->second.ptr);
using_low_level_alloc = false;
allocated.erase(it);
}
if (use_new_arena) {
TEST_ASSERT(LowLevelAlloc::DeleteArena(arena));
}
}
static struct BeforeMain {
BeforeMain() {
Test(false, false, 50000);
Test(true, false, 50000);
Test(true, true, 50000);
}
} before_main;
}
}
ABSL_NAMESPACE_END
}
int main(int argc, char *argv[]) {
printf("PASS\n");
#ifdef __EMSCRIPTEN__
MAIN_THREAD_EM_ASM({
if (ENVIRONMENT_IS_WEB) {
if (typeof TEST_FINISH === 'function') {
TEST_FINISH($0);
} else {
console.error('Attempted to exit with status ' + $0);
console.error('But TEST_FINSIHED is not a function.');
}
}
}, 0);
#endif
return 0;
} | void LowLevelAlloc::Free(void *v) {
if (v != nullptr) {
AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
sizeof(f->header));
LowLevelAlloc::Arena *arena = f->header.arena;
ArenaLock section(arena);
AddToFreelist(v, arena);
ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
arena->allocation_count--;
section.Leave();
}
} | |
#include "absl/strings/internal/cord_rep_btree.h"
#include <atomic>
#include <cassert>
#include <cstdint>
#include <iostream>
#include <ostream>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/optimization.h"
#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_consume.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t CordRepBtree::kMaxCapacity;
#endif
namespace {
using NodeStack = CordRepBtree * [CordRepBtree::kMaxDepth];
using EdgeType = CordRepBtree::EdgeType;
using OpResult = CordRepBtree::OpResult;
using CopyResult = CordRepBtree::CopyResult;
constexpr auto kFront = CordRepBtree::kFront;
constexpr auto kBack = CordRepBtree::kBack;
ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
void DumpAll(const CordRep* rep,
bool include_contents,
std::ostream& stream,
size_t depth = 0) {
assert(depth <= CordRepBtree::kMaxDepth + 2);
std::string sharing = const_cast<CordRep*>(rep)->refcount.IsOne()
? std::string("Private")
: absl::StrCat("Shared(", rep->refcount.Get(), ")");
std::string sptr = absl::StrCat("0x", absl::Hex(rep));
auto maybe_dump_data = [&stream, include_contents](const CordRep* r) {
if (include_contents) {
constexpr size_t kMaxDataLength = 60;
stream << ", data = \""
<< EdgeData(r).substr(0, kMaxDataLength)
<< (r->length > kMaxDataLength ? "\"..." : "\"");
}
stream << '\n';
};
stream << std::string(depth * 2, ' ') << sharing << " (" << sptr << ") ";
if (rep->IsBtree()) {
const CordRepBtree* node = rep->btree();
std::string label =
node->height() ? absl::StrCat("Node(", node->height(), ")") : "Leaf";
stream << label << ", len = " << node->length
<< ", begin = " << node->begin() << ", end = " << node->end()
<< "\n";
for (CordRep* edge : node->Edges()) {
DumpAll(edge, include_contents, stream, depth + 1);
}
} else if (rep->tag == SUBSTRING) {
const CordRepSubstring* substring = rep->substring();
stream << "Substring, len = " << rep->length
<< ", start = " << substring->start;
maybe_dump_data(rep);
DumpAll(substring->child, include_contents, stream, depth + 1);
} else if (rep->tag >= FLAT) {
stream << "Flat, len = " << rep->length
<< ", cap = " << rep->flat()->Capacity();
maybe_dump_data(rep);
} else if (rep->tag == EXTERNAL) {
stream << "Extn, len = " << rep->length;
maybe_dump_data(rep);
}
}
CordRepSubstring* CreateSubstring(CordRep* rep, size_t offset, size_t n) {
assert(n != 0);
assert(offset + n <= rep->length);
assert(offset != 0 || n != rep->length);
if (rep->tag == SUBSTRING) {
CordRepSubstring* substring = rep->substring();
offset += substring->start;
rep = CordRep::Ref(substring->child);
CordRep::Unref(substring);
}
assert(rep->IsExternal() || rep->IsFlat());
CordRepSubstring* substring = new CordRepSubstring();
substring->length = n;
substring->tag = SUBSTRING;
substring->start = offset;
substring->child = rep;
return substring;
}
inline CordRep* MakeSubstring(CordRep* rep, size_t offset, size_t n) {
if (n == rep->length) return rep;
if (n == 0) return CordRep::Unref(rep), nullptr;
return CreateSubstring(rep, offset, n);
}
inline CordRep* MakeSubstring(CordRep* rep, size_t offset) {
if (offset == 0) return rep;
return CreateSubstring(rep, offset, rep->length - offset);
}
CordRep* ResizeEdge(CordRep* edge, size_t length, bool is_mutable) {
assert(length > 0);
assert(length <= edge->length);
assert(IsDataEdge(edge));
if (length >= edge->length) return edge;
if (is_mutable && (edge->tag >= FLAT || edge->tag == SUBSTRING)) {
edge->length = length;
return edge;
}
return CreateSubstring(edge, 0, length);
}
template <EdgeType edge_type>
inline absl::string_view Consume(absl::string_view s, size_t n) {
return edge_type == kBack ? s.substr(n) : s.substr(0, s.size() - n);
}
template <EdgeType edge_type>
inline absl::string_view Consume(char* dst, absl::string_view s, size_t n) {
if (edge_type == kBack) {
memcpy(dst, s.data(), n);
return s.substr(n);
} else {
const size_t offset = s.size() - n;
memcpy(dst, s.data() + offset, n);
return s.substr(0, offset);
}
}
template <typename R, typename Fn>
inline void FastUnref(R* r, Fn&& fn) {
if (r->refcount.IsOne()) {
fn(r);
} else if (!r->refcount.DecrementExpectHighRefcount()) {
fn(r);
}
}
void DeleteSubstring(CordRepSubstring* substring) {
CordRep* rep = substring->child;
if (!rep->refcount.Decrement()) {
if (rep->tag >= FLAT) {
CordRepFlat::Delete(rep->flat());
} else {
assert(rep->tag == EXTERNAL);
CordRepExternal::Delete(rep->external());
}
}
delete substring;
}
void DeleteLeafEdge(CordRep* rep) {
assert(IsDataEdge(rep));
if (rep->tag >= FLAT) {
CordRepFlat::Delete(rep->flat());
} else if (rep->tag == EXTERNAL) {
CordRepExternal::Delete(rep->external());
} else {
DeleteSubstring(rep->substring());
}
}
template <EdgeType edge_type>
struct StackOperations {
inline bool owned(int depth) const { return depth < share_depth; }
inline CordRepBtree* node(int depth) const { return stack[depth]; }
inline CordRepBtree* BuildStack(CordRepBtree* tree, int depth) {
assert(depth <= tree->height());
int current_depth = 0;
while (current_depth < depth && tree->refcount.IsOne()) {
stack[current_depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
share_depth = current_depth + (tree->refcount.IsOne() ? 1 : 0);
while (current_depth < depth) {
stack[current_depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
return tree;
}
inline void BuildOwnedStack(CordRepBtree* tree, int height) {
assert(height <= CordRepBtree::kMaxHeight);
int depth = 0;
while (depth < height) {
assert(tree->refcount.IsOne());
stack[depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
assert(tree->refcount.IsOne());
share_depth = depth + 1;
}
static inline CordRepBtree* Finalize(CordRepBtree* tree, OpResult result) {
switch (result.action) {
case CordRepBtree::kPopped:
tree = edge_type == kBack ? CordRepBtree::New(tree, result.tree)
: CordRepBtree::New(result.tree, tree);
if (ABSL_PREDICT_FALSE(tree->height() > CordRepBtree::kMaxHeight)) {
tree = CordRepBtree::Rebuild(tree);
ABSL_RAW_CHECK(tree->height() <= CordRepBtree::kMaxHeight,
"Max height exceeded");
}
return tree;
case CordRepBtree::kCopied:
CordRep::Unref(tree);
ABSL_FALLTHROUGH_INTENDED;
case CordRepBtree::kSelf:
return result.tree;
}
ABSL_UNREACHABLE();
return result.tree;
}
template <bool propagate = false>
inline CordRepBtree* Unwind(CordRepBtree* tree, int depth, size_t length,
OpResult result) {
if (depth != 0) {
do {
CordRepBtree* node = stack[--depth];
const bool owned = depth < share_depth;
switch (result.action) {
case CordRepBtree::kPopped:
assert(!propagate);
result = node->AddEdge<edge_type>(owned, result.tree, length);
break;
case CordRepBtree::kCopied:
result = node->SetEdge<edge_type>(owned, result.tree, length);
if (propagate) stack[depth] = result.tree;
break;
case CordRepBtree::kSelf:
node->length += length;
while (depth > 0) {
node = stack[--depth];
node->length += length;
}
return node;
}
} while (depth > 0);
}
return Finalize(tree, result);
}
inline CordRepBtree* Propagate(CordRepBtree* tree, int depth, size_t length,
OpResult result) {
return Unwind<true>(tree, depth, length, result);
}
int share_depth;
NodeStack stack;
};
}
void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation) {
cord_btree_exhaustive_validation.store(do_exaustive_validation,
std::memory_order_relaxed);
}
bool IsCordBtreeExhaustiveValidationEnabled() {
return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
}
void CordRepBtree::Dump(const CordRep* rep, absl::string_view label,
bool include_contents, std::ostream& stream) {
stream << "===================================\n";
if (!label.empty()) {
stream << label << '\n';
stream << "-----------------------------------\n";
}
if (rep) {
DumpAll(rep, include_contents, stream);
} else {
stream << "NULL\n";
}
}
void CordRepBtree::Dump(const CordRep* rep, absl::string_view label,
std::ostream& stream) {
Dump(rep, label, false, stream);
}
void CordRepBtree::Dump(const CordRep* rep, std::ostream& stream) {
Dump(rep, absl::string_view(), false, stream);
}
template <size_t size>
static void DestroyTree(CordRepBtree* tree) {
for (CordRep* node : tree->Edges()) {
if (node->refcount.Decrement()) continue;
for (CordRep* edge : node->btree()->Edges()) {
if (edge->refcount.Decrement()) continue;
if (size == 1) {
DeleteLeafEdge(edge);
} else {
CordRepBtree::Destroy(edge->btree());
}
}
CordRepBtree::Delete(node->btree());
}
CordRepBtree::Delete(tree);
}
void CordRepBtree::Destroy(CordRepBtree* tree) {
switch (tree->height()) {
case 0:
for (CordRep* edge : tree->Edges()) {
if (!edge->refcount.Decrement()) {
DeleteLeafEdge(edge);
}
}
return CordRepBtree::Delete(tree);
case 1:
return DestroyTree<1>(tree);
default:
return DestroyTree<2>(tree);
}
}
bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) {
#define NODE_CHECK_VALID(x) \
if (!(x)) { \
ABSL_RAW_LOG(ERROR, "CordRepBtree::CheckValid() FAILED: %s", #x); \
return false; \
}
#define NODE_CHECK_EQ(x, y) \
if ((x) != (y)) { \
ABSL_RAW_LOG(ERROR, \
"CordRepBtree::CheckValid() FAILED: %s != %s (%s vs %s)", #x, \
#y, absl::StrCat(x).c_str(), absl::StrCat(y).c_str()); \
return false; \
}
NODE_CHECK_VALID(tree != nullptr);
NODE_CHECK_VALID(tree->IsBtree());
NODE_CHECK_VALID(tree->height() <= kMaxHeight);
NODE_CHECK_VALID(tree->begin() < tree->capacity());
NODE_CHECK_VALID(tree->end() <= tree->capacity());
NODE_CHECK_VALID(tree->begin() <= tree->end());
size_t child_length = 0;
for (CordRep* edge : tree->Edges()) {
NODE_CHECK_VALID(edge != nullptr);
if (tree->height() > 0) {
NODE_CHECK_VALID(edge->IsBtree());
NODE_CHECK_VALID(edge->btree()->height() == tree->height() - 1);
} else {
NODE_CHECK_VALID(IsDataEdge(edge));
}
child_length += edge->length;
}
NODE_CHECK_EQ(child_length, tree->length);
if ((!shallow || IsCordBtreeExhaustiveValidationEnabled()) &&
tree->height() > 0) {
for (CordRep* edge : tree->Edges()) {
if (!IsValid(edge->btree(), shallow)) return false;
}
}
return true;
#undef NODE_CHECK_VALID
#undef NODE_CHECK_EQ
}
#ifndef NDEBUG
CordRepBtree* CordRepBtree::AssertValid(CordRepBtree* tree, bool shallow) {
if (!IsValid(tree, shallow)) {
Dump(tree, "CordRepBtree validation failed:", false, std::cout);
ABSL_RAW_LOG(FATAL, "CordRepBtree::CheckValid() FAILED");
}
return tree;
}
const CordRepBtree* CordRepBtree::AssertValid(const CordRepBtree* tree,
bool shallow) {
if (!IsValid(tree, shallow)) {
Dump(tree, "CordRepBtree validation failed:", false, std::cout);
ABSL_RAW_LOG(FATAL, "CordRepBtree::CheckValid() FAILED");
}
return tree;
}
#endif
template <EdgeType edge_type>
inline OpResult CordRepBtree::AddEdge(bool owned, CordRep* edge, size_t delta) {
if (size() >= kMaxCapacity) return {New(edge), kPopped};
OpResult result = ToOpResult(owned);
result.tree->Add<edge_type>(edge);
result.tree->length += delta;
return result;
}
template <EdgeType edge_type>
OpResult CordRepBtree::SetEdge(bool owned, CordRep* edge, size_t delta) {
OpResult result;
const size_t idx = index(edge_type);
if (owned) {
result = {this, kSelf};
CordRep::Unref(edges_[idx]);
} else {
result = {CopyRaw(length), kCopied};
constexpr int shift = edge_type == kFront ? 1 : 0;
for (CordRep* r : Edges(begin() + shift, back() + shift)) {
CordRep::Ref(r);
}
}
result.tree->edges_[idx] = edge;
result.tree->length += delta;
return result;
}
template <EdgeType edge_type>
CordRepBtree* CordRepBtree::AddCordRep(CordRepBtree* tree, CordRep* rep) {
const int depth = tree->height();
const size_t length = rep->length;
StackOperations<edge_type> ops;
CordRepBtree* leaf = ops.BuildStack(tree, depth);
const OpResult result =
leaf->AddEdge<edge_type>(ops.owned(depth), rep, length);
return ops.Unwind(tree, depth, length, result);
}
template <>
CordRepBtree* CordRepBtree::NewLeaf<kBack>(absl::string_view data,
size_t extra) {
CordRepBtree* leaf = CordRepBtree::New(0);
size_t length = 0;
size_t end = 0;
const size_t cap = leaf->capacity();
while (!data.empty() && end != cap) {
auto* flat = CordRepFlat::New(data.length() + extra);
flat->length = (std::min)(data.length(), flat->Capacity());
length += flat->length;
leaf->edges_[end++] = flat;
data = Consume<kBack>(flat->Data(), data, flat->length);
}
leaf->length = length;
leaf->set_end(end);
return leaf;
}
template <>
CordRepBtree* CordRepBtree::NewLeaf<kFront>(absl::string_view data,
size_t extra) {
CordRepBtree* leaf = CordRepBtree::New(0);
size_t length = 0;
size_t begin = leaf->capacity();
leaf->set_end(leaf->capacity());
while (!data.empty() && begin != 0) {
auto* flat = CordRepFlat::New(data.length() + extra);
flat->length = (std::min)(data.length(), flat->Capacity());
length += flat->length;
leaf->edges_[--begin] = flat;
data = Consume<kFront>(flat->Data(), data, flat->length);
}
leaf->length = length;
leaf->set_begin(begin);
return leaf;
}
template <>
absl::string_view CordRepBtree::AddData<kBack>(absl::string_view data,
size_t extra) {
assert(!data.empty());
assert(size() < capacity());
AlignBegin();
const size_t cap = capacity();
do {
CordRepFlat* flat = CordRepFlat::New(data.length() + extra);
const size_t n = (std::min)(data.length(), flat->Capacity());
flat->length = n;
edges_[fetch_add_end(1)] = flat;
data = Consume<kBack>(flat->Data(), data, n);
} while (!data.empty() && end() != cap);
return data;
}
template <>
absl::string_view CordRepBtree::AddData<kFront>(absl::string_view data,
size_t extra) {
assert(!data.empty());
assert(size() < capacity());
AlignEnd();
do {
CordRepFlat* flat = CordRepFlat::New(data.length() + extra);
const size_t n = (std::min)(data.length(), flat->Capacity());
flat->length = n;
edges_[sub_fetch_begin(1)] = flat;
data = Consume<kFront>(flat->Data(), data, n);
} while (!data.empty() && begin() != 0);
return data;
}
template <EdgeType edge_type>
CordRepBtree* CordRepBtree::AddData(CordRepBtree* tree, absl::string_view data,
size_t extra) {
if (ABSL_PREDICT_FALSE(data.empty())) return tree;
const size_t original_data_size = data.size();
int depth = tree->height();
StackOperations<edge_type> ops;
CordRepBtree* leaf = ops.BuildStack(tree, depth);
if (leaf->size() < leaf->capacity()) {
OpResult result = leaf->ToOpResult(ops.owned(depth));
data = result.tree->AddData<edge_type>(data, extra);
if (data.empty()) {
result.tree->length += original_data_size;
return ops.Unwind(tree, depth, original_data_size, result);
}
size_t delta = original_data_size - data.size();
assert(delta > 0);
result.tree->length += delta;
tree = ops.Propagate(tree, depth, delta, result);
ops.share_depth = depth + 1;
}
for (;;) {
OpResult result = {CordRepBtree::NewLeaf<edge_type>(data, extra), kPopped};
if (result.tree->length == data.size()) {
return ops.Unwind(tree, depth, result.tree->length, result);
}
data = Consume<edge_type>(data, result.tree->length);
tree = ops.Unwind(tree, depth, result.tree->length, result);
depth = tree->height();
ops.BuildOwnedStack(tree, depth);
}
}
template <EdgeType edge_type>
CordRepBtree* CordRepBtree::Merge(CordRepBtree* dst, CordRepBtree* src) {
assert(dst->height() >= src->height());
const size_t length = src->length;
const int depth = dst->height() - src->height();
StackOperations<edge_type> ops;
CordRepBtree* merge_node = ops.BuildStack(dst, depth);
OpResult result;
if (merge_node->size() + src->size() <= kMaxCapacity) {
result = merge_node->ToOpResult(ops.owned(depth));
result.tree->Add<edge_type>(src->Edges());
result.tree->length += src->length;
if (src->refcount.IsOne()) {
Delete(src);
} else {
for (CordRep* edge : src->Edges()) CordRep::Ref(edge);
CordRepBtree::Unref(src);
}
} else {
result = {src, kPopped};
}
if (depth) {
return ops.Unwind(dst, depth, length, result);
}
return ops.Finalize(dst, result);
}
CopyResult CordRepBtree::CopySuffix(size_t offset) {
assert(offset < this->length);
int height = this->height();
CordRepBtree* node = this;
size_t len = node->length - offset;
CordRep* back = node->Edge(kBack);
while (back->length >= len) {
offset = back->length - len;
if (--height < 0) {
return {MakeSubstring(CordRep::Ref(back), offset), height};
}
node = back->btree();
back = node->Edge(kBack);
}
if (offset == 0) return {CordRep::Ref(node), height};
Position pos = node->IndexBeyond(offset);
CordRepBtree* sub = node->CopyToEndFrom(pos.index, len);
const CopyResult result = {sub, height};
while (pos.n != 0) {
assert(pos.index >= 1);
const size_t begin = pos.index - 1;
sub->set_begin(begin);
CordRep* const edge = node->Edge(begin);
len = pos.n;
offset = edge->length - len;
if (--height < 0) {
sub->edges_[begin] = MakeSubstring(CordRep::Ref(edge), offset, len);
return result;
}
node = edge->btree();
pos = node->IndexBeyond(offset);
CordRepBtree* nsub = node->CopyToEndFrom(pos.index, len);
sub->edges_[begin] = nsub;
sub = nsub;
}
sub->set_begin(pos.index);
return result;
}
CopyResult CordRepBtree::CopyPrefix(size_t n, bool allow_folding) {
assert(n > 0);
assert(n <= this->length);
int height = this->height();
CordRepBtree* node = this;
CordRep* front = node->Edge(kFront);
if (allow_folding) {
while (front->length >= n) {
if (--height < 0) return {MakeSubstring(CordRep::Ref(front), 0, n), -1};
node = front->btree();
front = node->Edge(kFront);
}
}
if (node->length == n) return {CordRep::Ref(node), height};
Position pos = node->IndexOf(n);
CordRepBtree* sub = node->CopyBeginTo(pos.index, n);
const CopyResult result = {sub, height};
while (pos.n != 0) {
size_t end = pos.index;
n = pos.n;
CordRep* edge = node->Edge(pos.index);
if (--height < 0) {
sub->edges_[end++] = MakeSubstring(CordRep::Ref(edge), 0, n);
sub->set_end(end);
AssertValid(result.edge->btree());
return result;
}
node = edge->btree();
pos = node->IndexOf(n);
CordRepBtree* nsub = node->CopyBeginTo(pos.index, n);
sub->edges_[end++] = nsub;
sub->set_end(end);
sub = nsub;
}
sub->set_end(pos.index);
AssertValid(result.edge->btree());
return result;
}
CordRep* CordRepBtree::ExtractFront(CordRepBtree* tree) {
CordRep* front = tree->Edge(tree->begin());
if (tree->refcount.IsOne()) {
Unref(tree->Edges(tree->begin() + 1, tree->end()));
CordRepBtree::Delete(tree);
} else {
CordRep::Ref(front);
CordRep::Unref(tree);
}
return front;
}
CordRepBtree* CordRepBtree::ConsumeBeginTo(CordRepBtree* tree, size_t end,
size_t new_length) {
assert(end <= tree->end());
if (tree->refcount.IsOne()) {
Unref(tree->Edges(end, tree->end()));
tree->set_end(end);
tree->length = new_length;
} else {
CordRepBtree* old = tree;
tree = tree->CopyBeginTo(end, new_length);
CordRep::Unref(old);
}
return tree;
}
CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) {
assert(tree != nullptr);
assert(n <= tree->length);
const size_t len = tree->length;
if (ABSL_PREDICT_FALSE(n == 0)) {
return tree;
}
if (ABSL_PREDICT_FALSE(n >= len)) {
CordRepBtree::Unref(tree);
return nullptr;
}
size_t length = len - n;
int height = tree->height();
bool is_mutable = tree->refcount.IsOne();
Position pos = tree->IndexOfLength(length);
while (pos.index == tree->begin()) {
CordRep* edge = ExtractFront(tree);
is_mutable &= edge->refcount.IsOne();
if (height-- == 0) return ResizeEdge(edge, length, is_mutable);
tree = edge->btree();
pos = tree->IndexOfLength(length);
}
CordRepBtree* top = tree = ConsumeBeginTo(tree, pos.index + 1, length);
CordRep* edge = tree->Edge(pos.index);
length = pos.n;
while (length != edge->length) {
assert(tree->refcount.IsOne());
const bool edge_is_mutable = edge->refcount.IsOne();
if (height-- == 0) {
tree->edges_[pos.index] = ResizeEdge(edge, length, edge_is_mutable);
return AssertValid(top);
}
if (!edge_is_mutable) {
tree->edges_[pos.index] = edge->btree()->CopyPrefix(length, false).edge;
CordRep::Unref(edge);
return AssertValid(top);
}
tree = edge->btree();
pos = tree->IndexOfLength(length);
tree = ConsumeBeginTo(edge->btree(), pos.index + 1, length);
edge = tree->Edge(pos.index);
length = pos.n;
}
return AssertValid(top);
}
CordRep* CordRepBtree::SubTree(size_t offset, size_t n) {
assert(n <= this->length);
assert(offs | #include "absl/strings/internal/cord_rep_btree.h"
#include <cmath>
#include <deque>
#include <iostream>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/cleanup/cleanup.h"
#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_test_util.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
class CordRepBtreeTestPeer {
public:
static void SetEdge(CordRepBtree* node, size_t idx, CordRep* edge) {
node->edges_[idx] = edge;
}
static void AddEdge(CordRepBtree* node, CordRep* edge) {
node->edges_[node->fetch_add_end(1)] = edge;
}
};
namespace {
using ::absl::cordrep_testing::AutoUnref;
using ::absl::cordrep_testing::CordCollectRepsIf;
using ::absl::cordrep_testing::CordToString;
using ::absl::cordrep_testing::CordVisitReps;
using ::absl::cordrep_testing::CreateFlatsFromString;
using ::absl::cordrep_testing::CreateRandomString;
using ::absl::cordrep_testing::MakeExternal;
using ::absl::cordrep_testing::MakeFlat;
using ::absl::cordrep_testing::MakeSubstring;
using ::testing::_;
using ::testing::AllOf;
using ::testing::AnyOf;
using ::testing::Conditional;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Le;
using ::testing::Ne;
using ::testing::Not;
using ::testing::SizeIs;
using ::testing::TypedEq;
MATCHER_P(EqFlatHolding, data, "Equals flat holding data") {
if (arg->tag < FLAT) {
*result_listener << "Expected FLAT, got tag " << static_cast<int>(arg->tag);
return false;
}
std::string actual = CordToString(arg);
if (actual != data) {
*result_listener << "Expected flat holding \"" << data
<< "\", got flat holding \"" << actual << "\"";
return false;
}
return true;
}
MATCHER_P(IsNode, height, absl::StrCat("Is a valid node of height ", height)) {
if (arg == nullptr) {
*result_listener << "Expected NODE, got nullptr";
return false;
}
if (arg->tag != BTREE) {
*result_listener << "Expected NODE, got " << static_cast<int>(arg->tag);
return false;
}
if (!CordRepBtree::IsValid(arg->btree())) {
CordRepBtree::Dump(arg->btree(), "Expected valid NODE, got:", false,
*result_listener->stream());
return false;
}
if (arg->btree()->height() != height) {
*result_listener << "Expected NODE of height " << height << ", got "
<< arg->btree()->height();
return false;
}
return true;
}
MATCHER_P2(IsSubstring, start, length,
absl::StrCat("Is a substring(start = ", start, ", length = ", length,
")")) {
if (arg == nullptr) {
*result_listener << "Expected substring, got nullptr";
return false;
}
if (arg->tag != SUBSTRING) {
*result_listener << "Expected SUBSTRING, got "
<< static_cast<int>(arg->tag);
return false;
}
const CordRepSubstring* const substr = arg->substring();
if (substr->start != start || substr->length != length) {
*result_listener << "Expected substring(" << start << ", " << length
<< "), got substring(" << substr->start << ", "
<< substr->length << ")";
return false;
}
return true;
}
MATCHER_P2(EqExtractResult, tree, rep, "Equals ExtractResult") {
if (arg.tree != tree || arg.extracted != rep) {
*result_listener << "Expected {" << static_cast<const void*>(tree) << ", "
<< static_cast<const void*>(rep) << "}, got {" << arg.tree
<< ", " << arg.extracted << "}";
return false;
}
return true;
}
class DataConsumer {
public:
DataConsumer(absl::string_view data, bool forward)
: data_(data), forward_(forward) {}
absl::string_view Next(size_t n) {
assert(n <= data_.size() - consumed_);
consumed_ += n;
return data_.substr(forward_ ? consumed_ - n : data_.size() - consumed_, n);
}
absl::string_view Consumed() const {
return forward_ ? data_.substr(0, consumed_)
: data_.substr(data_.size() - consumed_);
}
private:
absl::string_view data_;
size_t consumed_ = 0;
bool forward_;
};
CordRepBtree* BtreeAdd(CordRepBtree* node, bool append,
absl::string_view data) {
return append ? CordRepBtree::Append(node, data)
: CordRepBtree::Prepend(node, data);
}
void GetLeafEdges(const CordRepBtree* tree, std::vector<CordRep*>& edges) {
if (tree->height() == 0) {
for (CordRep* edge : tree->Edges()) {
edges.push_back(edge);
}
} else {
for (CordRep* edge : tree->Edges()) {
GetLeafEdges(edge->btree(), edges);
}
}
}
std::vector<CordRep*> GetLeafEdges(const CordRepBtree* tree) {
std::vector<CordRep*> edges;
GetLeafEdges(tree, edges);
return edges;
}
CordRepFlat* MakeHexFlat(size_t i) {
return MakeFlat(absl::StrCat("0x", absl::Hex(i, absl::kZeroPad4)));
}
CordRepBtree* MakeLeaf(size_t size = CordRepBtree::kMaxCapacity) {
assert(size <= CordRepBtree::kMaxCapacity);
CordRepBtree* leaf = CordRepBtree::Create(MakeHexFlat(0));
for (size_t i = 1; i < size; ++i) {
leaf = CordRepBtree::Append(leaf, MakeHexFlat(i));
}
return leaf;
}
CordRepBtree* MakeTree(size_t size, bool append = true) {
CordRepBtree* tree = CordRepBtree::Create(MakeHexFlat(0));
for (size_t i = 1; i < size; ++i) {
tree = append ? CordRepBtree::Append(tree, MakeHexFlat(i))
: CordRepBtree::Prepend(tree, MakeHexFlat(i));
}
return tree;
}
CordRepBtree* CreateTree(absl::Span<CordRep* const> reps) {
auto it = reps.begin();
CordRepBtree* tree = CordRepBtree::Create(*it);
while (++it != reps.end()) tree = CordRepBtree::Append(tree, *it);
return tree;
}
CordRepBtree* CreateTree(absl::string_view data, size_t chunk_size) {
return CreateTree(CreateFlatsFromString(data, chunk_size));
}
CordRepBtree* CreateTreeReverse(absl::string_view data, size_t chunk_size) {
std::vector<CordRep*> flats = CreateFlatsFromString(data, chunk_size);
auto rit = flats.rbegin();
CordRepBtree* tree = CordRepBtree::Create(*rit);
while (++rit != flats.rend()) tree = CordRepBtree::Prepend(tree, *rit);
return tree;
}
class CordRepBtreeTest : public testing::TestWithParam<bool> {
public:
bool shared() const { return GetParam(); }
static std::string ToString(testing::TestParamInfo<bool> param) {
return param.param ? "Shared" : "Private";
}
};
INSTANTIATE_TEST_SUITE_P(WithParam, CordRepBtreeTest, testing::Bool(),
CordRepBtreeTest::ToString);
class CordRepBtreeHeightTest : public testing::TestWithParam<int> {
public:
int height() const { return GetParam(); }
static std::string ToString(testing::TestParamInfo<int> param) {
return absl::StrCat(param.param);
}
};
INSTANTIATE_TEST_SUITE_P(WithHeights, CordRepBtreeHeightTest,
testing::Range(0, CordRepBtree::kMaxHeight),
CordRepBtreeHeightTest::ToString);
using TwoBools = testing::tuple<bool, bool>;
class CordRepBtreeDualTest : public testing::TestWithParam<TwoBools> {
public:
bool first_shared() const { return std::get<0>(GetParam()); }
bool second_shared() const { return std::get<1>(GetParam()); }
static std::string ToString(testing::TestParamInfo<TwoBools> param) {
if (std::get<0>(param.param)) {
return std::get<1>(param.param) ? "BothShared" : "FirstShared";
}
return std::get<1>(param.param) ? "SecondShared" : "Private";
}
};
INSTANTIATE_TEST_SUITE_P(WithParam, CordRepBtreeDualTest,
testing::Combine(testing::Bool(), testing::Bool()),
CordRepBtreeDualTest::ToString);
TEST(CordRepBtreeTest, SizeIsMultipleOf64) {
if (sizeof(size_t) == 8 && sizeof(void*) == 8) {
EXPECT_THAT(sizeof(CordRepBtree) % 64, Eq(0u))
<< "Should be multiple of 64";
}
}
TEST(CordRepBtreeTest, NewDestroyEmptyTree) {
auto* tree = CordRepBtree::New();
EXPECT_THAT(tree->size(), Eq(0u));
EXPECT_THAT(tree->height(), Eq(0));
EXPECT_THAT(tree->Edges(), ElementsAre());
CordRepBtree::Destroy(tree);
}
TEST(CordRepBtreeTest, NewDestroyEmptyTreeAtHeight) {
auto* tree = CordRepBtree::New(3);
EXPECT_THAT(tree->size(), Eq(0u));
EXPECT_THAT(tree->height(), Eq(3));
EXPECT_THAT(tree->Edges(), ElementsAre());
CordRepBtree::Destroy(tree);
}
TEST(CordRepBtreeTest, Btree) {
CordRep* rep = CordRepBtree::New();
EXPECT_THAT(rep->btree(), Eq(rep));
EXPECT_THAT(static_cast<const CordRep*>(rep)->btree(), Eq(rep));
CordRep::Unref(rep);
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
rep = MakeFlat("Hello world");
EXPECT_DEATH(rep->btree(), ".*");
EXPECT_DEATH(static_cast<const CordRep*>(rep)->btree(), ".*");
CordRep::Unref(rep);
#endif
}
TEST(CordRepBtreeTest, EdgeData) {
CordRepFlat* flat = MakeFlat("Hello world");
CordRepExternal* external = MakeExternal("Hello external");
CordRep* substr1 = MakeSubstring(1, 6, CordRep::Ref(flat));
CordRep* substr2 = MakeSubstring(1, 6, CordRep::Ref(external));
CordRep* bad_substr = MakeSubstring(1, 2, CordRep::Ref(substr1));
EXPECT_TRUE(IsDataEdge(flat));
EXPECT_THAT(EdgeData(flat).data(), TypedEq<const void*>(flat->Data()));
EXPECT_THAT(EdgeData(flat), Eq("Hello world"));
EXPECT_TRUE(IsDataEdge(external));
EXPECT_THAT(EdgeData(external).data(), TypedEq<const void*>(external->base));
EXPECT_THAT(EdgeData(external), Eq("Hello external"));
EXPECT_TRUE(IsDataEdge(substr1));
EXPECT_THAT(EdgeData(substr1).data(), TypedEq<const void*>(flat->Data() + 1));
EXPECT_THAT(EdgeData(substr1), Eq("ello w"));
EXPECT_TRUE(IsDataEdge(substr2));
EXPECT_THAT(EdgeData(substr2).data(),
TypedEq<const void*>(external->base + 1));
EXPECT_THAT(EdgeData(substr2), Eq("ello e"));
EXPECT_FALSE(IsDataEdge(bad_substr));
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
EXPECT_DEATH(EdgeData(bad_substr), ".*");
#endif
CordRep::Unref(bad_substr);
CordRep::Unref(substr2);
CordRep::Unref(substr1);
CordRep::Unref(external);
CordRep::Unref(flat);
}
TEST(CordRepBtreeTest, CreateUnrefLeaf) {
auto* flat = MakeFlat("a");
auto* leaf = CordRepBtree::Create(flat);
EXPECT_THAT(leaf->size(), Eq(1u));
EXPECT_THAT(leaf->height(), Eq(0));
EXPECT_THAT(leaf->Edges(), ElementsAre(flat));
CordRepBtree::Unref(leaf);
}
TEST(CordRepBtreeTest, NewUnrefNode) {
auto* leaf = CordRepBtree::Create(MakeFlat("a"));
CordRepBtree* tree = CordRepBtree::New(leaf);
EXPECT_THAT(tree->size(), Eq(1u));
EXPECT_THAT(tree->height(), Eq(1));
EXPECT_THAT(tree->Edges(), ElementsAre(leaf));
CordRepBtree::Unref(tree);
}
TEST_P(CordRepBtreeTest, AppendToLeafToCapacity) {
AutoUnref refs;
std::vector<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
auto* leaf = CordRepBtree::Create(flats.back());
for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) {
refs.RefIf(shared(), leaf);
flats.push_back(MakeHexFlat(i));
auto* result = CordRepBtree::Append(leaf, flats.back());
EXPECT_THAT(result->height(), Eq(0));
EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
EXPECT_THAT(result->Edges(), ElementsAreArray(flats));
leaf = result;
}
CordRep::Unref(leaf);
}
TEST_P(CordRepBtreeTest, PrependToLeafToCapacity) {
AutoUnref refs;
std::deque<CordRep*> flats;
flats.push_front(MakeHexFlat(0));
auto* leaf = CordRepBtree::Create(flats.front());
for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) {
refs.RefIf(shared(), leaf);
flats.push_front(MakeHexFlat(i));
auto* result = CordRepBtree::Prepend(leaf, flats.front());
EXPECT_THAT(result->height(), Eq(0));
EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
EXPECT_THAT(result->Edges(), ElementsAreArray(flats));
leaf = result;
}
CordRep::Unref(leaf);
}
TEST_P(CordRepBtreeTest, AppendPrependToLeafToCapacity) {
AutoUnref refs;
std::deque<CordRep*> flats;
flats.push_front(MakeHexFlat(0));
auto* leaf = CordRepBtree::Create(flats.front());
for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) {
refs.RefIf(shared(), leaf);
CordRepBtree* result;
if (i % 2 != 0) {
flats.push_front(MakeHexFlat(i));
result = CordRepBtree::Prepend(leaf, flats.front());
} else {
flats.push_back(MakeHexFlat(i));
result = CordRepBtree::Append(leaf, flats.back());
}
EXPECT_THAT(result->height(), Eq(0));
EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
EXPECT_THAT(result->Edges(), ElementsAreArray(flats));
leaf = result;
}
CordRep::Unref(leaf);
}
TEST_P(CordRepBtreeTest, AppendToLeafBeyondCapacity) {
AutoUnref refs;
auto* leaf = MakeLeaf();
refs.RefIf(shared(), leaf);
CordRep* flat = MakeFlat("abc");
auto* result = CordRepBtree::Append(leaf, flat);
ASSERT_THAT(result, IsNode(1));
EXPECT_THAT(result, Ne(leaf));
absl::Span<CordRep* const> edges = result->Edges();
ASSERT_THAT(edges, ElementsAre(leaf, IsNode(0)));
EXPECT_THAT(edges[1]->btree()->Edges(), ElementsAre(flat));
CordRep::Unref(result);
}
TEST_P(CordRepBtreeTest, PrependToLeafBeyondCapacity) {
AutoUnref refs;
auto* leaf = MakeLeaf();
refs.RefIf(shared(), leaf);
CordRep* flat = MakeFlat("abc");
auto* result = CordRepBtree::Prepend(leaf, flat);
ASSERT_THAT(result, IsNode(1));
EXPECT_THAT(result, Ne(leaf));
absl::Span<CordRep* const> edges = result->Edges();
ASSERT_THAT(edges, ElementsAre(IsNode(0), leaf));
EXPECT_THAT(edges[0]->btree()->Edges(), ElementsAre(flat));
CordRep::Unref(result);
}
TEST_P(CordRepBtreeTest, AppendToTreeOneDeep) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
AutoUnref refs;
std::vector<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
CordRepBtree* tree = CordRepBtree::Create(flats.back());
for (size_t i = 1; i <= max_cap; ++i) {
flats.push_back(MakeHexFlat(i));
tree = CordRepBtree::Append(tree, flats.back());
}
ASSERT_THAT(tree, IsNode(1));
for (size_t i = max_cap + 1; i < max_cap * max_cap; ++i) {
refs.RefIf(shared(), tree);
refs.RefIf(i % 4 == 0, tree->Edges().back());
flats.push_back(MakeHexFlat(i));
CordRepBtree* result = CordRepBtree::Append(tree, flats.back());
ASSERT_THAT(result, IsNode(1));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
std::vector<CordRep*> edges = GetLeafEdges(result);
ASSERT_THAT(edges, ElementsAreArray(flats));
tree = result;
}
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeTest, AppendToTreeTwoDeep) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
AutoUnref refs;
std::vector<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
CordRepBtree* tree = CordRepBtree::Create(flats.back());
for (size_t i = 1; i <= max_cap * max_cap; ++i) {
flats.push_back(MakeHexFlat(i));
tree = CordRepBtree::Append(tree, flats.back());
}
ASSERT_THAT(tree, IsNode(2));
for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
refs.RefIf(shared(), tree);
refs.RefIf(i % 16 == 0, tree->Edges().back());
refs.RefIf(i % 4 == 0, tree->Edges().back()->btree()->Edges().back());
flats.push_back(MakeHexFlat(i));
CordRepBtree* result = CordRepBtree::Append(tree, flats.back());
ASSERT_THAT(result, IsNode(2));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
std::vector<CordRep*> edges = GetLeafEdges(result);
ASSERT_THAT(edges, ElementsAreArray(flats));
tree = result;
}
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeTest, PrependToTreeOneDeep) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
AutoUnref refs;
std::deque<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
CordRepBtree* tree = CordRepBtree::Create(flats.back());
for (size_t i = 1; i <= max_cap; ++i) {
flats.push_front(MakeHexFlat(i));
tree = CordRepBtree::Prepend(tree, flats.front());
}
ASSERT_THAT(tree, IsNode(1));
for (size_t i = max_cap + 1; i < max_cap * max_cap; ++i) {
refs.RefIf(shared(), tree);
refs.RefIf(i % 4 == 0, tree->Edges().back());
flats.push_front(MakeHexFlat(i));
CordRepBtree* result = CordRepBtree::Prepend(tree, flats.front());
ASSERT_THAT(result, IsNode(1));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
std::vector<CordRep*> edges = GetLeafEdges(result);
ASSERT_THAT(edges, ElementsAreArray(flats));
tree = result;
}
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeTest, PrependToTreeTwoDeep) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
AutoUnref refs;
std::deque<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
CordRepBtree* tree = CordRepBtree::Create(flats.back());
for (size_t i = 1; i <= max_cap * max_cap; ++i) {
flats.push_front(MakeHexFlat(i));
tree = CordRepBtree::Prepend(tree, flats.front());
}
ASSERT_THAT(tree, IsNode(2));
for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
refs.RefIf(shared(), tree);
refs.RefIf(i % 16 == 0, tree->Edges().back());
refs.RefIf(i % 4 == 0, tree->Edges().back()->btree()->Edges().back());
flats.push_front(MakeHexFlat(i));
CordRepBtree* result = CordRepBtree::Prepend(tree, flats.front());
ASSERT_THAT(result, IsNode(2));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
std::vector<CordRep*> edges = GetLeafEdges(result);
ASSERT_THAT(edges, ElementsAreArray(flats));
tree = result;
}
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeDualTest, MergeLeafsNotExceedingCapacity) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeLeaf(3);
GetLeafEdges(left, flats);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeLeaf(2);
GetLeafEdges(right, flats);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(0));
EXPECT_THAT(tree->Edges(), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeDualTest, MergeLeafsExceedingCapacity) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
CordRepBtree* left = MakeLeaf(CordRepBtree::kMaxCapacity - 2);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeLeaf(CordRepBtree::kMaxCapacity - 1);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(1));
EXPECT_THAT(tree->Edges(), ElementsAre(left, right));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeDualTest, MergeEqualHeightTrees) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeTree(CordRepBtree::kMaxCapacity * 3);
GetLeafEdges(left, flats);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeTree(CordRepBtree::kMaxCapacity * 2);
GetLeafEdges(right, flats);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(1));
EXPECT_THAT(tree->Edges(), SizeIs(5u));
EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeDualTest, MergeLeafWithTreeNotExceedingLeafCapacity) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeTree(CordRepBtree::kMaxCapacity * 2 + 2);
GetLeafEdges(left, flats);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeTree(3);
GetLeafEdges(right, flats);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(1));
EXPECT_THAT(tree->Edges(), SizeIs(3u));
EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeDualTest, MergeLeafWithTreeExceedingLeafCapacity) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeTree(CordRepBtree::kMaxCapacity * 3 - 2);
GetLeafEdges(left, flats);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeTree(3);
GetLeafEdges(right, flats);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(1));
EXPECT_THAT(tree->Edges(), SizeIs(4u));
EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
void RefEdgesAt(size_t depth, AutoUnref& refs, CordRepBtree* tree) {
absl::Span<CordRep* const> edges = tree->Edges();
if (depth == 0) {
refs.Ref(edges.front());
refs.Ref(edges.back());
} else {
assert(tree->height() > 0);
RefEdgesAt(depth - 1, refs, edges.front()->btree());
RefEdgesAt(depth - 1, refs, edges.back()->btree());
}
}
TEST(CordRepBtreeTest, MergeFuzzTest) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
std::minstd_rand rnd;
std::uniform_int_distribution<int> coin_flip(0, 1);
std::uniform_int_distribution<int> dice_throw(1, 6);
auto random_leaf_count = [&]() {
std::uniform_int_distribution<int> dist_height(0, 3);
std::uniform_int_distribution<int> dist_leaf(0, max_cap - 1);
const int height = dist_height(rnd);
return (height ? pow(max_cap, height) : 0) + dist_leaf(rnd);
};
for (int i = 0; i < 10000; ++i) {
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeTree(random_leaf_count(), coin_flip(rnd));
GetLeafEdges(left, flats);
if (dice_throw(rnd) == 1) {
std::uniform_int_distribution<size_t> dist(
0, static_cast<size_t>(left->height()));
RefEdgesAt(dist(rnd), refs, left);
}
CordRepBtree* right = MakeTree(random_leaf_count(), coin_flip(rnd));
GetLeafEdges(right, flats);
if (dice_throw(rnd) == 1) {
std::uniform_int_distribution<size_t> dist(
0, static_cast<size_t>(right->height()));
RefEdgesAt(dist(rnd), refs, right);
}
CordRepBtree* tree = CordRepBtree::Append(left, right);
EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeTest, RemoveSuffix) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
for (size_t cap : {max_cap - 1, max_cap * 2, max_cap * max_cap * 2}) {
const std::string data = CreateRandomString(cap * 512);
{
AutoUnref refs;
CordRepBtree* node = refs.RefIf(shared(), CreateTree(data, 512));
EXPECT_THAT(CordRepBtree::RemoveSuffix(node, data.length()), Eq(nullptr));
node = refs.RefIf(shared(), CreateTree(data, 512));
EXPECT_THAT(CordRepBtree::RemoveSuffix(node, 0), Eq(node));
CordRep::Unref(node);
}
for (size_t n = 1; n < data.length(); ++n) {
AutoUnref refs;
auto flats = CreateFlatsFromString(data, 512);
CordRepBtree* node = refs.RefIf(shared(), CreateTree(flats));
CordRep* rep = refs.Add(CordRepBtree::RemoveSuffix(node, n));
EXPECT_THAT(CordToString(rep), Eq(data.substr(0, data.length() - n)));
auto is_flat = [](CordRep* rep) { return rep->tag >= FLAT; };
std::vector<CordRep*> edges = CordCollectRepsIf(is_flat, rep);
ASSERT_THAT(edges.size(), Le(flats.size()));
CordRep* last_edge = edges.back();
edges.pop_back();
const size_t last_length = rep->length - edges.size() * 512;
size_t index = 0;
for (CordRep* edge : edges) {
ASSERT_THAT(edge, Eq(flats[index++]));
ASSERT_THAT(edge->length, Eq(512u));
}
if (last_length >= 500) {
EXPECT_THAT(last_edge, Eq(flats[index++]));
if (shared()) {
EXPECT_THAT(last_edge->length, Eq(512u));
} else {
EXPECT_TRUE(last_edge->refcount.IsOne());
EXPECT_THAT(last_edge->length, Eq(last_length));
}
}
}
}
}
TEST(CordRepBtreeTest, SubTree) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
const size_t n = max_cap * max_cap * 2;
const std::string data = CreateRandomString(n * 3);
std::vector<CordRep*> flats;
for (absl::string_view s = data; !s.empty(); s.remove_prefix(3)) {
flats.push_back(MakeFlat(s.substr(0, 3)));
}
CordRepBtree* node = CordRepBtree::Create(CordRep::Ref(flats[0]));
for (size_t i = 1; i < flats.size(); ++i) {
node = CordRepBtree::Append(node, CordRep::Ref(flats[i]));
}
for (size_t offset = 0; offset < data.length(); ++offset) {
for (size_t length = 1; length <= data.length() - offset; ++length) {
CordRep* rep = node->SubTree(offset, length);
EXPECT_THAT(CordToString(rep), Eq(data.substr(offset, length)));
CordRep::Unref(rep);
}
}
CordRepBtree::Unref(node);
for (CordRep* rep : flats) {
CordRep::Unref(rep);
}
}
TEST(CordRepBtreeTest, SubTreeOnExistingSubstring) {
AutoUnref refs;
std::string data = CreateRandomString(1000);
CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("abc"));
CordRep* flat = MakeFlat(data);
leaf = CordRepBtree::Append(leaf, flat);
CordRep* result = leaf->SubTree(0, 3 + 990);
ASSERT_THAT(result->tag, Eq(BTREE));
CordRep::Unref(leaf);
leaf = result->btree();
ASSERT_THAT(leaf->Edges(), ElementsAre(_, IsSubstring(0u, 990u)));
EXPECT_THAT(leaf->Edges()[1]->substring()->child, Eq(flat));
result = leaf->SubTree(3 + 5, 970);
ASSERT_THAT(result, IsSubstring(5u, 970u));
EXPECT_THAT(result->substring()->child, Eq(flat));
CordRep::Unref(result);
CordRep::Unref(leaf);
}
TEST_P(CordRepBtreeTest, AddDataToLeaf) {
const size_t n = CordRepBtree::kMaxCapacity;
const std::string data = CreateRandomString(n * 3);
for (bool append : {true, false}) {
AutoUnref refs;
DataConsumer consumer(data, append);
SCOPED_TRACE(append ? "Append" : "Prepend");
CordRepBtree* leaf = CordRepBtree::Create(MakeFlat(consumer.Next(3)));
for (size_t i = 1; i < n; ++i) {
refs.RefIf(shared(), leaf);
CordRepBtree* result = BtreeAdd(leaf, append, consumer.Next(3));
EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
EXPECT_THAT(CordToString(result), Eq(consumer.Consumed()));
leaf = result;
}
CordRep::Unref(leaf);
}
}
TEST_P(CordRepBtreeTest, AppendDataToTree) {
AutoUnref refs;
size_t n = CordRepBtree::kMaxCapacity + CordRepBtree::kMaxCapacity / 2;
std::string data = CreateRandomString(n * 3);
CordRepBtree* tree = refs.RefIf(shared(), CreateTree(data, 3));
CordRepBtree* leaf0 = tree->Edges()[0]->btree();
CordRepBtree* leaf1 = tree->Edges()[1]->btree();
CordRepBtree* result = CordRepBtree::Append(tree, "123456789");
EXPECT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
EXPECT_THAT(result->Edges(),
ElementsAre(leaf0, Conditional(shared(), Ne(leaf1), Eq(leaf1))));
EXPECT_THAT(CordToString(result), Eq(data + "123456789"));
CordRep::Unref(result);
}
TEST_P(CordRepBtreeTest, PrependDataToTree) {
AutoUnref refs;
size_t n = CordRepBtree::kMaxCapacity + CordRepBtree::kMaxCapacity / 2;
std::string data = CreateRandomString(n * 3);
CordRepBtree* tree = refs.RefIf(shared(), CreateTreeReverse(data, 3));
CordRepBtree* leaf0 = tree->Edges()[0]->btree();
CordRepBtree* leaf1 = tree->Edges()[1]->btree();
CordRepBtree* result = CordRepBtree::Prepend(tree, "123456789");
EXPECT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
EXPECT_THAT(result->Edges(), | CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) {
assert(tree != nullptr);
assert(n <= tree->length);
const size_t len = tree->length;
if (ABSL_PREDICT_FALSE(n == 0)) {
return tree;
}
if (ABSL_PREDICT_FALSE(n >= len)) {
CordRepBtree::Unref(tree);
return nullptr;
}
size_t length = len - n;
int height = tree->height();
bool is_mutable = tree->refcount.IsOne();
Position pos = tree->IndexOfLength(length);
while (pos.index == tree->begin()) {
CordRep* edge = ExtractFront(tree);
is_mutable &= edge->refcount.IsOne();
if (height-- == 0) return ResizeEdge(edge, length, is_mutable);
tree = edge->btree();
pos = tree->IndexOfLength(length);
}
CordRepBtree* top = tree = ConsumeBeginTo(tree, pos.index + 1, length);
CordRep* edge = tree->Edge(pos.index);
length = pos.n;
while (length != edge->length) {
assert(tree->refcount.IsOne());
const bool edge_is_mutable = edge->refcount.IsOne();
if (height-- == 0) {
tree->edges_[pos.index] = ResizeEdge(edge, length, edge_is_mutable);
return AssertValid(top);
}
if (!edge_is_mutable) {
tree->edges_[pos.index] = edge->btree()->CopyPrefix(length, false).edge;
CordRep::Unref(edge);
return AssertValid(top);
}
tree = edge->btree();
pos = tree->IndexOfLength(length);
tree = ConsumeBeginTo(edge->btree(), pos.index + 1, length);
edge = tree->Edge(pos.index);
length = pos.n;
}
return AssertValid(top);
} | TEST_P(CordRepBtreeTest, RemoveSuffix) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
for (size_t cap : {max_cap - 1, max_cap * 2, max_cap * max_cap * 2}) {
const std::string data = CreateRandomString(cap * 512);
{
AutoUnref refs;
CordRepBtree* node = refs.RefIf(shared(), CreateTree(data, 512));
EXPECT_THAT(CordRepBtree::RemoveSuffix(node, data.length()), Eq(nullptr));
node = refs.RefIf(shared(), CreateTree(data, 512));
EXPECT_THAT(CordRepBtree::RemoveSuffix(node, 0), Eq(node));
CordRep::Unref(node);
}
for (size_t n = 1; n < data.length(); ++n) {
AutoUnref refs;
auto flats = CreateFlatsFromString(data, 512);
CordRepBtree* node = refs.RefIf(shared(), CreateTree(flats));
CordRep* rep = refs.Add(CordRepBtree::RemoveSuffix(node, n));
EXPECT_THAT(CordToString(rep), Eq(data.substr(0, data.length() - n)));
auto is_flat = [](CordRep* rep) { return rep->tag >= FLAT; };
std::vector<CordRep*> edges = CordCollectRepsIf(is_flat, rep);
ASSERT_THAT(edges.size(), Le(flats.size()));
CordRep* last_edge = edges.back();
edges.pop_back();
const size_t last_length = rep->length - edges.size() * 512;
size_t index = 0;
for (CordRep* edge : edges) {
ASSERT_THAT(edge, Eq(flats[index++]));
ASSERT_THAT(edge->length, Eq(512u));
}
if (last_length >= 500) {
EXPECT_THAT(last_edge, Eq(flats[index++]));
if (shared()) {
EXPECT_THAT(last_edge->length, Eq(512u));
} else {
EXPECT_TRUE(last_edge->refcount.IsOne());
EXPECT_THAT(last_edge->length, Eq(last_length));
}
}
}
}
} |
#include "tensorflow/lite/core/signature_runner.h"
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
namespace tflite {
namespace impl {
SignatureRunner::SignatureRunner(const internal::SignatureDef* signature_def,
Subgraph* subgraph)
: signature_def_(signature_def), subgraph_(subgraph) {
for (const auto& it : signature_def_->inputs) {
input_names_.push_back(it.first.c_str());
}
for (const auto& it : signature_def_->outputs) {
output_names_.push_back(it.first.c_str());
}
}
TfLiteTensor* SignatureRunner::input_tensor(const char* input_name) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return nullptr;
}
return subgraph_->tensor(it->second);
}
const TfLiteTensor* SignatureRunner::output_tensor(
const char* output_name) const {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return nullptr;
}
return subgraph_->tensor(it->second);
}
TfLiteStatus SignatureRunner::ResizeInputTensor(
const char* input_name, const std::vector<int>& new_size) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->ResizeInputTensor(it->second, new_size);
}
TfLiteStatus SignatureRunner::ResizeInputTensorStrict(
const char* input_name, const std::vector<int>& new_size) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->ResizeInputTensorStrict(it->second, new_size);
}
TfLiteStatus SignatureRunner::Invoke() {
if (subgraph_->continue_invocation_)
(void)subgraph_->continue_invocation_->test_and_set();
TF_LITE_ENSURE_STATUS(subgraph_->Invoke());
if (!allow_buffer_handle_output_) {
for (int tensor_index : subgraph_->outputs()) {
TF_LITE_ENSURE_STATUS(
subgraph_->EnsureTensorDataIsReadable(tensor_index));
}
}
return kTfLiteOk;
}
TfLiteStatus SignatureRunner::SetCustomAllocationForInputTensor(
const char* input_name, const TfLiteCustomAllocation& allocation,
int64_t flags) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->SetCustomAllocationForTensor(it->second, allocation, flags);
}
TfLiteStatus SignatureRunner::SetCustomAllocationForOutputTensor(
const char* output_name, const TfLiteCustomAllocation& allocation,
int64_t flags) {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return kTfLiteError;
}
return subgraph_->SetCustomAllocationForTensor(it->second, allocation, flags);
}
}
} | #include "tensorflow/lite/core/signature_runner.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace impl {
namespace {
TEST(SignatureRunnerTest, TestMultiSignatures) {
TestErrorReporter reporter;
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/multi_signatures.bin", &reporter);
ASSERT_TRUE(model);
ops::builtin::BuiltinOpResolver resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
std::vector<const std::string*> signature_defs =
interpreter->signature_keys();
ASSERT_EQ(signature_defs.size(), 2);
ASSERT_EQ(*(signature_defs[0]), "add");
ASSERT_EQ(*(signature_defs[1]), "sub");
ASSERT_EQ(interpreter->GetSignatureRunner("dummy"), nullptr);
SignatureRunner* add_runner =
interpreter->GetSignatureRunner(signature_defs[0]->c_str());
ASSERT_NE(add_runner, nullptr);
ASSERT_EQ(add_runner->signature_key(), "add");
const std::vector<const char*>& input_names = add_runner->input_names();
const std::vector<const char*>& output_names = add_runner->output_names();
ASSERT_EQ(input_names.size(), 1);
ASSERT_EQ(std::string(input_names[0]), "x");
ASSERT_EQ(output_names.size(), 1);
ASSERT_EQ(std::string(output_names[0]), "output_0");
ASSERT_EQ(add_runner->ResizeInputTensor("x", {2}), kTfLiteOk);
ASSERT_EQ(add_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* add_input = add_runner->input_tensor("x");
ASSERT_EQ(add_runner->input_tensor("dummy"), nullptr);
const TfLiteTensor* add_output = add_runner->output_tensor("output_0");
ASSERT_EQ(add_runner->output_tensor("dummy"), nullptr);
ASSERT_NE(add_input, nullptr);
ASSERT_NE(add_output, nullptr);
add_input->data.f[0] = 2;
add_input->data.f[1] = 4;
ASSERT_EQ(add_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(add_output->data.f[0], 4);
ASSERT_EQ(add_output->data.f[1], 6);
SignatureRunner* sub_runner = interpreter->GetSignatureRunner("sub");
ASSERT_NE(sub_runner, nullptr);
ASSERT_EQ(sub_runner->signature_key(), "sub");
const std::vector<const char*>& input_names2 = sub_runner->input_names();
const std::vector<const char*>& output_names2 = sub_runner->output_names();
ASSERT_EQ(input_names2.size(), 1);
ASSERT_EQ(std::string(input_names2[0]), "x");
ASSERT_EQ(output_names2.size(), 1);
ASSERT_EQ(std::string(output_names2[0]), "output_0");
ASSERT_EQ(sub_runner->ResizeInputTensor("x", {3}), kTfLiteOk);
ASSERT_EQ(sub_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* sub_input = sub_runner->input_tensor("x");
const TfLiteTensor* sub_output = sub_runner->output_tensor("output_0");
ASSERT_NE(sub_input, nullptr);
ASSERT_NE(sub_output, nullptr);
sub_input->data.f[0] = 2;
sub_input->data.f[1] = 4;
sub_input->data.f[2] = 6;
ASSERT_EQ(sub_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(sub_output->data.f[0], -1);
ASSERT_EQ(sub_output->data.f[1], 1);
ASSERT_EQ(sub_output->data.f[2], 3);
}
}
}
} | TfLiteStatus SignatureRunner::Invoke() {
if (subgraph_->continue_invocation_)
(void)subgraph_->continue_invocation_->test_and_set();
TF_LITE_ENSURE_STATUS(subgraph_->Invoke());
if (!allow_buffer_handle_output_) {
for (int tensor_index : subgraph_->outputs()) {
TF_LITE_ENSURE_STATUS(
subgraph_->EnsureTensorDataIsReadable(tensor_index));
}
}
return kTfLiteOk;
} | #include "tensorflow/lite/core/signature_runner.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace impl {
namespace {
TEST(SignatureRunnerTest, TestMultiSignatures) {
TestErrorReporter reporter;
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/multi_signatures.bin", &reporter);
ASSERT_TRUE(model);
ops::builtin::BuiltinOpResolver resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
std::vector<const std::string*> signature_defs =
interpreter->signature_keys();
ASSERT_EQ(signature_defs.size(), 2);
ASSERT_EQ(*(signature_defs[0]), "add");
ASSERT_EQ(*(signature_defs[1]), "sub");
ASSERT_EQ(interpreter->GetSignatureRunner("dummy"), nullptr);
SignatureRunner* add_runner =
interpreter->GetSignatureRunner(signature_defs[0]->c_str());
ASSERT_NE(add_runner, nullptr);
ASSERT_EQ(add_runner->signature_key(), "add");
const std::vector<const char*>& input_names = add_runner->input_names();
const std::vector<const char*>& output_names = add_runner->output_names();
ASSERT_EQ(input_names.size(), 1);
ASSERT_EQ(std::string(input_names[0]), "x");
ASSERT_EQ(output_names.size(), 1);
ASSERT_EQ(std::string(output_names[0]), "output_0");
ASSERT_EQ(add_runner->ResizeInputTensor("x", {2}), kTfLiteOk);
ASSERT_EQ(add_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* add_input = add_runner->input_tensor("x");
ASSERT_EQ(add_runner->input_tensor("dummy"), nullptr);
const TfLiteTensor* add_output = add_runner->output_tensor("output_0");
ASSERT_EQ(add_runner->output_tensor("dummy"), nullptr);
ASSERT_NE(add_input, nullptr);
ASSERT_NE(add_output, nullptr);
add_input->data.f[0] = 2;
add_input->data.f[1] = 4;
ASSERT_EQ(add_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(add_output->data.f[0], 4);
ASSERT_EQ(add_output->data.f[1], 6);
SignatureRunner* sub_runner = interpreter->GetSignatureRunner("sub");
ASSERT_NE(sub_runner, nullptr);
ASSERT_EQ(sub_runner->signature_key(), "sub");
const std::vector<const char*>& input_names2 = sub_runner->input_names();
const std::vector<const char*>& output_names2 = sub_runner->output_names();
ASSERT_EQ(input_names2.size(), 1);
ASSERT_EQ(std::string(input_names2[0]), "x");
ASSERT_EQ(output_names2.size(), 1);
ASSERT_EQ(std::string(output_names2[0]), "output_0");
ASSERT_EQ(sub_runner->ResizeInputTensor("x", {3}), kTfLiteOk);
ASSERT_EQ(sub_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* sub_input = sub_runner->input_tensor("x");
const TfLiteTensor* sub_output = sub_runner->output_tensor("output_0");
ASSERT_NE(sub_input, nullptr);
ASSERT_NE(sub_output, nullptr);
sub_input->data.f[0] = 2;
sub_input->data.f[1] = 4;
sub_input->data.f[2] = 6;
ASSERT_EQ(sub_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(sub_output->data.f[0], -1);
ASSERT_EQ(sub_output->data.f[1], 1);
ASSERT_EQ(sub_output->data.f[2], 3);
} |
#include "leveldb/write_batch.h"
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
#include "leveldb/db.h"
#include "util/coding.h"
namespace leveldb {
static const size_t kHeader = 12;
WriteBatch::WriteBatch() { Clear(); }
WriteBatch::~WriteBatch() = default;
WriteBatch::Handler::~Handler() = default;
void WriteBatch::Clear() {
rep_.clear();
rep_.resize(kHeader);
}
size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
Status WriteBatch::Iterate(Handler* handler) const {
Slice input(rep_);
if (input.size() < kHeader) {
return Status::Corruption("malformed WriteBatch (too small)");
}
input.remove_prefix(kHeader);
Slice key, value;
int found = 0;
while (!input.empty()) {
found++;
char tag = input[0];
input.remove_prefix(1);
switch (tag) {
case kTypeValue:
if (GetLengthPrefixedSlice(&input, &key) &&
GetLengthPrefixedSlice(&input, &value)) {
handler->Put(key, value);
} else {
return Status::Corruption("bad WriteBatch Put");
}
break;
case kTypeDeletion:
if (GetLengthPrefixedSlice(&input, &key)) {
handler->Delete(key);
} else {
return Status::Corruption("bad WriteBatch Delete");
}
break;
default:
return Status::Corruption("unknown WriteBatch tag");
}
}
if (found != WriteBatchInternal::Count(this)) {
return Status::Corruption("WriteBatch has wrong count");
} else {
return Status::OK();
}
}
int WriteBatchInternal::Count(const WriteBatch* b) {
return DecodeFixed32(b->rep_.data() + 8);
}
void WriteBatchInternal::SetCount(WriteBatch* b, int n) {
EncodeFixed32(&b->rep_[8], n);
}
SequenceNumber WriteBatchInternal::Sequence(const WriteBatch* b) {
return SequenceNumber(DecodeFixed64(b->rep_.data()));
}
void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) {
EncodeFixed64(&b->rep_[0], seq);
}
void WriteBatch::Put(const Slice& key, const Slice& value) {
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
rep_.push_back(static_cast<char>(kTypeValue));
PutLengthPrefixedSlice(&rep_, key);
PutLengthPrefixedSlice(&rep_, value);
}
void WriteBatch::Delete(const Slice& key) {
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
rep_.push_back(static_cast<char>(kTypeDeletion));
PutLengthPrefixedSlice(&rep_, key);
}
void WriteBatch::Append(const WriteBatch& source) {
WriteBatchInternal::Append(this, &source);
}
namespace {
class MemTableInserter : public WriteBatch::Handler {
public:
SequenceNumber sequence_;
MemTable* mem_;
void Put(const Slice& key, const Slice& value) override {
mem_->Add(sequence_, kTypeValue, key, value);
sequence_++;
}
void Delete(const Slice& key) override {
mem_->Add(sequence_, kTypeDeletion, key, Slice());
sequence_++;
}
};
}
Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
MemTableInserter inserter;
inserter.sequence_ = WriteBatchInternal::Sequence(b);
inserter.mem_ = memtable;
return b->Iterate(&inserter);
}
void WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) {
assert(contents.size() >= kHeader);
b->rep_.assign(contents.data(), contents.size());
}
void WriteBatchInternal::Append(WriteBatch* dst, const WriteBatch* src) {
SetCount(dst, Count(dst) + Count(src));
assert(src->rep_.size() >= kHeader);
dst->rep_.append(src->rep_.data() + kHeader, src->rep_.size() - kHeader);
}
} | #include "gtest/gtest.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/logging.h"
namespace leveldb {
static std::string PrintContents(WriteBatch* b) {
InternalKeyComparator cmp(BytewiseComparator());
MemTable* mem = new MemTable(cmp);
mem->Ref();
std::string state;
Status s = WriteBatchInternal::InsertInto(b, mem);
int count = 0;
Iterator* iter = mem->NewIterator();
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ParsedInternalKey ikey;
EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey));
switch (ikey.type) {
case kTypeValue:
state.append("Put(");
state.append(ikey.user_key.ToString());
state.append(", ");
state.append(iter->value().ToString());
state.append(")");
count++;
break;
case kTypeDeletion:
state.append("Delete(");
state.append(ikey.user_key.ToString());
state.append(")");
count++;
break;
}
state.append("@");
state.append(NumberToString(ikey.sequence));
}
delete iter;
if (!s.ok()) {
state.append("ParseError()");
} else if (count != WriteBatchInternal::Count(b)) {
state.append("CountMismatch()");
}
mem->Unref();
return state;
}
TEST(WriteBatchTest, Empty) {
WriteBatch batch;
ASSERT_EQ("", PrintContents(&batch));
ASSERT_EQ(0, WriteBatchInternal::Count(&batch));
}
TEST(WriteBatchTest, Multiple) {
WriteBatch batch;
batch.Put(Slice("foo"), Slice("bar"));
batch.Delete(Slice("box"));
batch.Put(Slice("baz"), Slice("boo"));
WriteBatchInternal::SetSequence(&batch, 100);
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
ASSERT_EQ(
"Put(baz, boo)@102"
"Delete(box)@101"
"Put(foo, bar)@100",
PrintContents(&batch));
}
TEST(WriteBatchTest, Corruption) {
WriteBatch batch;
batch.Put(Slice("foo"), Slice("bar"));
batch.Delete(Slice("box"));
WriteBatchInternal::SetSequence(&batch, 200);
Slice contents = WriteBatchInternal::Contents(&batch);
WriteBatchInternal::SetContents(&batch,
Slice(contents.data(), contents.size() - 1));
ASSERT_EQ(
"Put(foo, bar)@200"
"ParseError()",
PrintContents(&batch));
}
TEST(WriteBatchTest, Append) {
WriteBatch b1, b2;
WriteBatchInternal::SetSequence(&b1, 200);
WriteBatchInternal::SetSequence(&b2, 300);
b1.Append(b2);
ASSERT_EQ("", PrintContents(&b1));
b2.Put("a", "va");
b1.Append(b2);
ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
b2.Clear();
b2.Put("b", "vb");
b1.Append(b2);
ASSERT_EQ(
"Put(a, va)@200"
"Put(b, vb)@201",
PrintContents(&b1));
b2.Delete("foo");
b1.Append(b2);
ASSERT_EQ(
"Put(a, va)@200"
"Put(b, vb)@202"
"Put(b, vb)@201"
"Delete(foo)@203",
PrintContents(&b1));
}
TEST(WriteBatchTest, ApproximateSize) {
WriteBatch batch;
size_t empty_size = batch.ApproximateSize();
batch.Put(Slice("foo"), Slice("bar"));
size_t one_key_size = batch.ApproximateSize();
ASSERT_LT(empty_size, one_key_size);
batch.Put(Slice("baz"), Slice("boo"));
size_t two_keys_size = batch.ApproximateSize();
ASSERT_LT(one_key_size, two_keys_size);
batch.Delete(Slice("box"));
size_t post_delete_size = batch.ApproximateSize();
ASSERT_LT(two_keys_size, post_delete_size);
}
} | void WriteBatchInternal::SetCount(WriteBatch* b, int n) {
EncodeFixed32(&b->rep_[8], n);
} | TEST(WriteBatchTest, Multiple) {
WriteBatch batch;
batch.Put(Slice("foo"), Slice("bar"));
batch.Delete(Slice("box"));
batch.Put(Slice("baz"), Slice("boo"));
WriteBatchInternal::SetSequence(&batch, 100);
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
ASSERT_EQ(
"Put(baz, boo)@102"
"Delete(box)@101"
"Put(foo, bar)@100",
PrintContents(&batch));
} |
#include "tsl/platform/status.h"
#include <stdio.h>
#include <deque>
#include <functional>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/stack_frame.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace {
class StatusLogSink : public TFLogSink {
public:
static StatusLogSink* GetInstance() {
static StatusLogSink* sink = new StatusLogSink();
return sink;
}
void enable() {
absl::call_once(flag_, [this] {
num_messages_ = 5;
if (const char* num_msgs_str =
getenv("TF_WORKER_NUM_FORWARDED_LOG_MESSAGES")) {
if (!absl::SimpleAtoi(num_msgs_str, &num_messages_)) {
LOG(WARNING) << "Failed to parse env variable "
"TF_WORKER_NUM_WARNING_ERROR_LOG_IN_STATUS="
<< num_msgs_str << " as int. Using the default value "
<< num_messages_ << ".";
}
}
if (num_messages_ > 0) {
TFAddLogSink(this);
}
});
}
void GetMessages(std::vector<std::string>* logs) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
for (auto& msg : messages_) {
logs->push_back(msg);
}
}
void Send(const TFLogEntry& entry) override TF_LOCKS_EXCLUDED(mu_) {
if (entry.log_severity() < absl::LogSeverity::kWarning) return;
mutex_lock lock(mu_);
messages_.emplace_back(entry.ToString());
if (messages_.size() > static_cast<size_t>(num_messages_)) {
messages_.pop_front();
}
}
private:
mutex mu_;
absl::once_flag flag_;
int num_messages_ = 0;
std::deque<std::string> messages_ TF_GUARDED_BY(mu_);
};
}
namespace errors {
static constexpr const char kStackTraceProtoUrl[] =
"type.googleapis.com/tensorflow.StackTracePayload";
void SetStackTrace(absl::Status& status, std::vector<StackFrame> stack_trace) {
std::vector<std::string> items;
items.reserve(stack_trace.size());
for (StackFrame& frame : stack_trace) {
items.push_back(
absl::StrCat(absl::StrReplaceAll(frame.file_name, {{"\n", ""}}), "\n",
frame.line_number, "\n",
absl::StrReplaceAll(frame.function_name, {{"\n", ""}})));
}
status.SetPayload(kStackTraceProtoUrl,
absl::Cord(absl::StrJoin(items, "\n")));
}
std::vector<StackFrame> GetStackTrace(const absl::Status& status) {
std::vector<StackFrame> stack_trace;
absl::optional<absl::Cord> maybe_serialized_payload =
status.GetPayload(kStackTraceProtoUrl);
if (maybe_serialized_payload.has_value()) {
std::vector<std::string> split =
absl::StrSplit(maybe_serialized_payload.value().Flatten(), '\n');
assert(split.size() % 3 == 0);
for (int i = 0; i < split.size() / 3; ++i) {
const int idx = 3 * i;
int line_number = -1;
CHECK(absl::SimpleAtoi(split[idx + 1], &line_number));
stack_trace.emplace_back(std::move(split[idx]), line_number,
std::move(split[idx + 2]));
}
}
return stack_trace;
}
}
#ifdef _WIN32
const char* NullTerminatedMessage(const absl::Status& status) {
return absl::StatusMessageAsCStr(status);
}
#endif
std::string* TfCheckOpHelperOutOfLine(const absl::Status& v, const char* msg) {
std::stringstream ss;
ss << "Non-OK-status: " << msg << "\nStatus: " << v;
return new std::string(ss.str());
}
StatusGroup::StatusGroup() {}
StatusGroup::StatusGroup(std::initializer_list<absl::Status> statuses) {
for (const absl::Status& s : statuses) {
Update(s);
}
}
static constexpr const char kDerivedStatusProtoUrl[] =
"type.googleapis.com/tensorflow.DerivedStatus";
absl::Status StatusGroup::MakeDerived(const absl::Status& s) {
if (IsDerived(s)) {
return s;
} else {
absl::Status derived(s);
derived.SetPayload(kDerivedStatusProtoUrl, absl::Cord(""));
return derived;
}
}
bool StatusGroup::IsDerived(const absl::Status& s) {
return s.GetPayload(kDerivedStatusProtoUrl).has_value();
}
void StatusGroup::ConfigureLogHistory() {
StatusLogSink::GetInstance()->enable();
}
void StatusGroup::Update(const absl::Status& s) {
if (s.ok()) {
++num_ok_;
} else {
ok_ = false;
if (IsDerived(s)) {
derived_.insert(s);
} else {
non_derived_.insert(s);
}
}
}
static constexpr int kMaxAggregatedStatusMessageSize = 8 * 1024;
static constexpr int kMaxAttachedLogMessageSize = 512;
std::unordered_map<std::string, absl::Cord> StatusGroup::GetPayloads() const {
std::unordered_map<std::string, absl::Cord> payloads;
auto capture_payload = [&payloads](absl::string_view key,
const absl::Cord& value) {
payloads[std::string(key)] = value;
};
for (const auto& status : derived_) {
status.ForEachPayload(capture_payload);
}
for (const auto& status : non_derived_) {
status.ForEachPayload(capture_payload);
}
payloads.erase(kDerivedStatusProtoUrl);
return payloads;
}
absl::Status MakeStatus(
absl::StatusCode code, absl::string_view message,
const std::unordered_map<std::string, absl::Cord>& payloads) {
absl::Status status(code, message);
for (const auto& payload : payloads) {
status.SetPayload(payload.first, payload.second);
}
return status;
}
std::string MakeString(const absl::Status& status) {
return absl::StrCat(absl::StatusCodeToString(status.code()), ": ",
status.message());
}
absl::Status StatusGroup::as_summary_status() const {
if (ok_) {
return absl::OkStatus();
}
auto get_recent_logs = [this]() -> std::string {
if (!recent_logs_.empty()) {
std::vector<std::string> fmt;
fmt.push_back("\nRecent warning and error logs:");
for (auto& log : recent_logs_) {
fmt.push_back(" " + log.substr(0, kMaxAttachedLogMessageSize));
}
return absl::StrJoin(fmt, "\n");
} else {
return "";
}
};
if (non_derived_.size() == 1) {
return MakeStatus(
non_derived_.begin()->code(),
strings::StrCat(non_derived_.begin()->message(), get_recent_logs()),
GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<std::string> fmt;
fmt.push_back(
strings::Printf("%zu root error(s) found.", non_derived_.size()));
int index = 0;
auto code = absl::StatusCode::kCancelled;
for (const auto& s : non_derived_) {
if (code == absl::StatusCode::kCancelled &&
s.code() != absl::StatusCode::kCancelled) {
code = s.code();
}
fmt.emplace_back(strings::StrCat(" (", index, ") ", MakeString(s)));
++index;
}
fmt.push_back(strings::Printf("%zu successful operations.", num_ok_));
fmt.push_back(
strings::Printf("%zu derived errors ignored.", derived_.size()));
std::string error_msg =
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize);
return MakeStatus(code, strings::StrCat(error_msg, get_recent_logs()),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
absl::Status StatusGroup::as_concatenated_status() const {
if (ok_) {
return absl::OkStatus();
}
if (non_derived_.size() == 1) {
return MakeStatus(non_derived_.begin()->code(),
non_derived_.begin()->message(), GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<string> fmt;
fmt.emplace_back("\n=====================");
for (const auto& s : non_derived_) {
fmt.emplace_back(MakeString(s));
}
fmt.emplace_back("=====================\n");
return MakeStatus(
non_derived_.begin()->code(),
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
void StatusGroup::AttachLogMessages() {
recent_logs_.clear();
StatusLogSink::GetInstance()->GetMessages(&recent_logs_);
}
} | #include "tsl/platform/status.h"
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/stack_frame.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
#include "tsl/protobuf/status.pb.h"
namespace tsl {
namespace {
using ::testing::IsEmpty;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(ToStringTest, PayloadsArePrinted) {
absl::Status status = errors::Aborted("Aborted Error Message");
status.SetPayload("payload_key", absl::Cord(absl::StrFormat(
"payload_value %c%c%c", 1, 2, 3)));
EXPECT_EQ(status.ToString(),
"ABORTED: Aborted Error Message [payload_key='payload_value "
"\\x01\\x02\\x03']");
}
TEST(ToStringTest, MatchesAbslStatus) {
absl::Status status = errors::Aborted("Aborted Error Message");
status.SetPayload("payload_key", absl::Cord(absl::StrFormat(
"payload_value %c%c%c", 1, 2, 3)));
absl::Status absl_status =
absl::Status(absl::StatusCode::kAborted, status.message());
absl_status.SetPayload("payload_key", absl::Cord(absl::StrFormat(
"payload_value %c%c%c", 1, 2, 3)));
EXPECT_EQ(status.ToString(), absl_status.ToString());
}
TEST(StackTrace, SerializeAndDeserializeCorrectly) {
absl::Status status = errors::Aborted("Aborted Error Message");
std::vector<StackFrame> stack_trace;
stack_trace.push_back(StackFrame("filename_1", 33, "func_name_1"));
stack_trace.push_back(StackFrame("filename_2", 66, "func_name_2"));
errors::SetStackTrace(status, stack_trace);
std::vector<StackFrame> deserialized = errors::GetStackTrace(status);
EXPECT_EQ(stack_trace.size(), deserialized.size());
for (size_t i = 0; i < stack_trace.size(); ++i) {
EXPECT_EQ(stack_trace[i], deserialized[i]);
}
}
TEST(StatusGroupTest, DeterministicOrderWithoutPayloads) {
absl::Status status_a = errors::Aborted("Status A");
absl::Status status_b = errors::Aborted("Status B");
absl::Status status_c = errors::Aborted("Status C");
absl::Status combined =
StatusGroup({status_a, status_b, status_c}).as_summary_status();
EXPECT_EQ(combined,
StatusGroup({status_a, status_b, status_c}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_a, status_c, status_b}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_b, status_a, status_c}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_b, status_c, status_a}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_c, status_a, status_b}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_c, status_b, status_a}).as_summary_status());
}
TEST(StatusGroupTest, DeterministicOrderWithPayloads) {
absl::Status status_a = errors::Aborted("Status A");
status_a.SetPayload("payload_key", absl::Cord("payload_value_a"));
absl::Status status_b = errors::Aborted("Status B");
status_b.SetPayload("payload_key", absl::Cord("payload_value_b"));
absl::Status status_c = errors::Aborted("Status C");
status_c.SetPayload("payload_key", absl::Cord("payload_value_c"));
absl::Status combined =
StatusGroup({status_a, status_b, status_c}).as_summary_status();
ASSERT_TRUE(combined.GetPayload("payload_key").has_value());
std::string payload(combined.GetPayload("payload_key").value());
EXPECT_EQ(payload, StatusGroup({status_a, status_b, status_c})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_a, status_c, status_b})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_b, status_a, status_c})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_b, status_c, status_a})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_c, status_a, status_b})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_c, status_b, status_a})
.as_summary_status()
.GetPayload("payload_key"));
}
TEST(StatusGroupTest, PayloadsMergedProperly) {
absl::Status status_a = errors::Aborted("Status A");
status_a.SetPayload("payload_key_a",
absl::Cord(std::string("payload_value_a")));
absl::Status status_b = errors::Aborted("Status B");
status_b.SetPayload("payload_key_b",
absl::Cord(std::string("payload_value_b")));
absl::Status status_c = errors::Aborted("Status C");
status_c.SetPayload("payload_key_c",
absl::Cord(std::string("payload_value_c")));
absl::Status derived_status_c =
StatusGroup::MakeDerived(errors::Aborted("Status C"));
derived_status_c.SetPayload(
"payload_key_c", absl::Cord(std::string("derived_payload_value_c")));
StatusGroup status_group({status_a, status_b, status_c, derived_status_c});
EXPECT_THAT(status_group.GetPayloads(), ::testing::SizeIs(3));
absl::Status combined = status_group.as_summary_status();
EXPECT_EQ(combined.GetPayload("payload_key_a"), "payload_value_a");
EXPECT_EQ(combined.GetPayload("payload_key_b"), "payload_value_b");
EXPECT_EQ(combined.GetPayload("payload_key_c"), "payload_value_c");
}
TEST(Status, ErrorStatusForEachPayloadIteratesOverAll) {
absl::Status s(absl::StatusCode::kInternal, "Error message");
s.SetPayload("key1", absl::Cord("value1"));
s.SetPayload("key2", absl::Cord("value2"));
s.SetPayload("key3", absl::Cord("value3"));
std::unordered_map<std::string, absl::Cord> payloads;
s.ForEachPayload([&payloads](StringPiece key, const absl::Cord& value) {
payloads[std::string(key)] = value;
});
EXPECT_EQ(payloads.size(), 3);
EXPECT_EQ(payloads["key1"], "value1");
EXPECT_EQ(payloads["key2"], "value2");
EXPECT_EQ(payloads["key3"], "value3");
}
TEST(Status, OkStatusForEachPayloadNoIteration) {
absl::Status s = absl::OkStatus();
s.SetPayload("key1", absl::Cord("value1"));
s.SetPayload("key2", absl::Cord("value2"));
s.SetPayload("key3", absl::Cord("value3"));
std::unordered_map<std::string, absl::Cord> payloads;
s.ForEachPayload([&payloads](StringPiece key, const absl::Cord& value) {
payloads[std::string(key)] = value;
});
EXPECT_EQ(payloads.size(), 0);
}
TEST(Status, SaveOKStatusToProto) {
tensorflow::StatusProto status_proto = StatusToProto(absl::OkStatus());
EXPECT_EQ(status_proto.code(), error::OK);
EXPECT_THAT(status_proto.message(), IsEmpty());
}
TEST(Status, SaveErrorStatusToProto) {
tensorflow::StatusProto status_proto =
StatusToProto(errors::NotFound("Not found"));
EXPECT_EQ(status_proto.code(), error::NOT_FOUND);
EXPECT_EQ(status_proto.message(), "Not found");
}
TEST(Status, SaveEmptyStatusToProto) {
tensorflow::StatusProto status_proto = StatusToProto(absl::Status());
EXPECT_EQ(status_proto.code(), error::OK);
EXPECT_THAT(status_proto.message(), IsEmpty());
}
TEST(Status, MakeOKStatusFromProto) {
tensorflow::StatusProto status_proto;
status_proto.set_code(error::OK);
EXPECT_THAT(StatusFromProto(status_proto), IsOk());
}
TEST(Status, MakeErrorStatusFromProto) {
tensorflow::StatusProto status_proto;
status_proto.set_code(error::INVALID_ARGUMENT);
status_proto.set_message("Invalid argument");
EXPECT_THAT(StatusFromProto(status_proto),
StatusIs(error::INVALID_ARGUMENT, "Invalid argument"));
}
TEST(Status, MakeStatusFromEmptyProto) {
EXPECT_THAT(StatusFromProto(tensorflow::StatusProto()), IsOk());
}
}
} | std::unordered_map<std::string, absl::Cord> StatusGroup::GetPayloads() const {
std::unordered_map<std::string, absl::Cord> payloads;
auto capture_payload = [&payloads](absl::string_view key,
const absl::Cord& value) {
payloads[std::string(key)] = value;
};
for (const auto& status : derived_) {
status.ForEachPayload(capture_payload);
}
for (const auto& status : non_derived_) {
status.ForEachPayload(capture_payload);
}
payloads.erase(kDerivedStatusProtoUrl);
return payloads;
} | TEST(StatusGroupTest, PayloadsMergedProperly) {
absl::Status status_a = errors::Aborted("Status A");
status_a.SetPayload("payload_key_a",
absl::Cord(std::string("payload_value_a")));
absl::Status status_b = errors::Aborted("Status B");
status_b.SetPayload("payload_key_b",
absl::Cord(std::string("payload_value_b")));
absl::Status status_c = errors::Aborted("Status C");
status_c.SetPayload("payload_key_c",
absl::Cord(std::string("payload_value_c")));
absl::Status derived_status_c =
StatusGroup::MakeDerived(errors::Aborted("Status C"));
derived_status_c.SetPayload(
"payload_key_c", absl::Cord(std::string("derived_payload_value_c")));
StatusGroup status_group({status_a, status_b, status_c, derived_status_c});
EXPECT_THAT(status_group.GetPayloads(), ::testing::SizeIs(3));
absl::Status combined = status_group.as_summary_status();
EXPECT_EQ(combined.GetPayload("payload_key_a"), "payload_value_a");
EXPECT_EQ(combined.GetPayload("payload_key_b"), "payload_value_b");
EXPECT_EQ(combined.GetPayload("payload_key_c"), "payload_value_c");
} |
#include "tensorflow/lite/delegates/gpu/common/gpu_model.h"
#include <algorithm>
#include <any>
#include <map>
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/operation_selector.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/special_selector.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/subgraph.h"
#include "tensorflow/lite/delegates/gpu/common/task/serialization_base.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/add_bias.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h"
namespace tflite {
namespace gpu {
namespace {
bool IsReady(const absl::flat_hash_set<ValueId>& ready_tensors,
const GpuNode& node) {
for (const ValueId in_id : node.inputs) {
if (ready_tensors.find(in_id) == ready_tensors.end()) {
return false;
}
}
return true;
}
absl::Status MergeGpuNodes(const GpuInfo& gpu_info, GpuNode* src,
GpuNode* dst) {
for (int j = 1; j < src->inputs.size(); ++j) {
dst->inputs.push_back(src->inputs[j]);
}
dst->outputs[0] = src->outputs[0];
dst->name += " -> " + src->name;
return dst->gpu_operation->AddOperation(gpu_info, src->gpu_operation.get());
}
flatbuffers::Offset<data::TensorDescWithId> Encode(
const TensorDescriptor& desc, const ValueId& id,
flatbuffers::FlatBufferBuilder* builder) {
auto desc_fb = Encode(desc, builder);
data::TensorDescWithIdBuilder desc_builder(*builder);
desc_builder.add_desc(desc_fb);
desc_builder.add_id(id);
return desc_builder.Finish();
}
flatbuffers::Offset<data::GpuNode> Encode(
const GpuNode& node, flatbuffers::FlatBufferBuilder* builder) {
auto op_fb = Encode(*node.gpu_operation, builder);
std::vector<int32_t> in_ids(node.inputs.size());
for (int i = 0; i < in_ids.size(); ++i) {
in_ids[i] = node.inputs[i];
}
std::vector<int32_t> out_ids(node.outputs.size());
for (int i = 0; i < out_ids.size(); ++i) {
out_ids[i] = node.outputs[i];
}
auto in_ids_fb = builder->CreateVector(in_ids);
auto out_ids_fb = builder->CreateVector(out_ids);
auto name_fb = builder->CreateString(node.name);
data::GpuNodeBuilder node_builder(*builder);
node_builder.add_gpu_op(op_fb);
node_builder.add_input_ids(in_ids_fb);
node_builder.add_output_ids(out_ids_fb);
node_builder.add_name(name_fb);
return node_builder.Finish();
}
absl::Status Decode(const data::GpuNode* fb_node, GpuNode* node) {
GPUOperation op;
RETURN_IF_ERROR(Decode(fb_node->gpu_op(), &op));
node->gpu_operation = std::make_unique<GPUOperation>(std::move(op));
for (auto in_fb : *fb_node->input_ids()) {
node->inputs.push_back(in_fb);
}
for (auto out_fb : *fb_node->output_ids()) {
node->outputs.push_back(out_fb);
}
node->name = std::string(fb_node->name()->c_str(), fb_node->name()->size());
return absl::OkStatus();
}
bool IsAssociativeLinkableOp(const Node& node,
const std::vector<Value*>& inputs,
const std::vector<Value*>& outputs) {
if (inputs.size() == 1) {
return false;
}
const OperationType op_type = OperationTypeFromString(node.operation.type);
if (op_type != OperationType::ADD && op_type != OperationType::MUL) {
return false;
}
const auto dst_shape = outputs[0]->tensor.shape;
for (int i = 0; i < inputs.size(); ++i) {
const auto src_shape = inputs[i]->tensor.shape;
if (dst_shape.b != src_shape.b && src_shape.b == 1) {
return false;
}
if (dst_shape.h != src_shape.h && src_shape.h == 1) {
return false;
}
if (dst_shape.w != src_shape.w && src_shape.w == 1) {
return false;
}
if (dst_shape.c != src_shape.c && src_shape.c == 1) {
return false;
}
}
return true;
}
absl::Status CheckExternalTensorDescription(const GpuInfo& gpu_info,
const TensorDescriptor& tensor_desc,
const BHWC& shape,
DataType data_type) {
if (tensor_desc.GetDataType() != data_type) {
return absl::InvalidArgumentError(
"Global precision and precision of predefined/external tensors must be "
"synchronized.");
}
if (tensor_desc.HasAxis(Axis::DEPTH)) {
return absl::InvalidArgumentError(
"Currently no support of Depth dimension in predefined/external "
"tensors.");
}
if (tensor_desc.HasAxis(Axis::BATCH) && shape.b == 1) {
return absl::InvalidArgumentError("Wrong layout, batch mismatch.");
}
if (!tensor_desc.HasAxis(Axis::BATCH) && shape.b != 1) {
return absl::InvalidArgumentError("Wrong layout, batch mismatch.");
}
if (!tensor_desc.CanCreateTensorWithShape(gpu_info, shape).ok()) {
return absl::UnavailableError(
"Current device can not allocate tensor with this shape for "
"predefined/external descriptor.");
}
return absl::OkStatus();
}
class TensorReserver {
public:
TensorReserver() : next_(0) {}
ValueId Add(const TensorDescriptor& dummy) {
reservations_[next_] = dummy;
return next_++;
}
void Add(ValueId id, const TensorDescriptor& dummy) {
reservations_[id] = dummy;
}
ValueId GetNewId() { return next_++; }
void SetNext(ValueId id) { next_ = id; }
TensorDescriptor Get(ValueId id) { return reservations_[id]; }
public:
absl::flat_hash_map<ValueId, TensorDescriptor> reservations_;
ValueId next_;
};
absl::Status ReserveGraphTensors(const CreateGpuModelInfo& create_info,
const GpuInfo& gpu_info,
const GraphFloat32& graph,
TensorReserver* tensor_reserver) {
ValueId max_id = 0;
auto tensors = graph.values();
for (auto& t : tensors) {
auto data_type = DeduceDataTypeFromPrecision(create_info.precision);
if (t->tensor.type != DataType::FLOAT32 &&
t->tensor.type != DataType::FLOAT16) {
data_type = t->tensor.type;
}
const auto shape = graph.GetValue(t->id)->tensor.shape;
auto it_predefined = create_info.predefined.find(t->id);
auto it_immutable_external =
create_info.external_immutable_tensors.find(t->id);
auto it_mutable_external = create_info.external_mutable_tensors.find(t->id);
int external_categories_count = 0;
TensorDescriptor tensor_desc;
if (it_predefined != create_info.predefined.end()) {
external_categories_count++;
tensor_desc = it_predefined->second;
}
if (it_immutable_external != create_info.external_immutable_tensors.end()) {
external_categories_count++;
tensor_desc = it_immutable_external->second->GetDescriptor();
}
if (it_mutable_external != create_info.external_mutable_tensors.end()) {
external_categories_count++;
tensor_desc = it_mutable_external->second;
}
if (external_categories_count > 1) {
return absl::InvalidArgumentError(
"Tensors ids from predefined / external_immutable_tensors / "
"external_mutable_tensors should not intersect.");
}
if (external_categories_count == 1) {
if (!(graph.IsGraphInput(t->id) || graph.IsGraphOutput(t->id))) {
return absl::InvalidArgumentError(
"Currently external can be used only for graph inputs/outputs");
}
RETURN_IF_ERROR(CheckExternalTensorDescription(gpu_info, tensor_desc,
shape, data_type));
} else {
TensorStorageType storage_type = create_info.storage_type;
Layout layout = shape.b == 1 ? Layout::HWC : Layout::BHWC;
const bool can_use_single_texture =
storage_type == TensorStorageType::TEXTURE_2D ||
storage_type == TensorStorageType::TEXTURE_3D ||
storage_type == TensorStorageType::TEXTURE_ARRAY;
if (shape.c < 4 && can_use_single_texture &&
TensorDescriptor{data_type, TensorStorageType::SINGLE_TEXTURE_2D,
layout}
.CanCreateTensorWithShape(gpu_info, shape)
.ok()) {
storage_type = TensorStorageType::SINGLE_TEXTURE_2D;
}
tensor_desc = TensorDescriptor{data_type, storage_type, layout};
RETURN_IF_ERROR(
tensor_desc.UpdateToSupportedStorageType(gpu_info, shape));
if (gpu_info.IsApiMetal() &&
storage_type == TensorStorageType::TEXTURE_2D) {
if (!(gpu_info.IsApple() && gpu_info.apple_info.IsFamilyApple1())) {
tensor_desc.SetUseBufferForWriteOnlyTexture2d(true);
}
}
}
tensor_desc.SetBHWCShape(shape);
tensor_reserver->Add(t->id, tensor_desc);
max_id = std::max(max_id, t->id);
}
tensor_reserver->SetNext(max_id + 1);
return absl::OkStatus();
}
absl::Status ConvertOperations(const GpuInfo& gpu_info,
const GraphFloat32& graph,
const CreateGpuModelInfo& create_info,
TensorReserver* tensor_reserver,
GpuModel* gpu_model) {
std::map<ValueId, TensorDescriptor> tensor_descriptors;
const auto values = graph.values();
for (auto value : values) {
tensor_descriptors[value->id] = tensor_reserver->Get(value->id);
}
std::set<NodeId> consumed_nodes;
std::vector<Node*> graph_nodes = graph.nodes();
std::map<ValueId, int>
tensor_usages;
for (const auto& input : gpu_model->input_ids_and_refs) {
tensor_usages[input.first] = -1;
}
std::vector<SharedWeightsConvDesc> shared_conv_weights;
std::vector<SharedWeightsConvDesc>* shared_conv_weights_ptr =
create_info.hints.Check(ModelHints::kReuseConvWeights)
? &shared_conv_weights
: nullptr;
for (int i = 0; i < graph_nodes.size(); ++i) {
const Node& node = *graph_nodes[i];
if (consumed_nodes.find(node.id) != consumed_nodes.end()) {
continue;
}
auto op_type = OperationTypeFromString(node.operation.type);
if (op_type == OperationType::CONSTANT) {
auto attr =
std::any_cast<ConstTensorAttributes>(node.operation.attributes);
auto outputs = graph.FindOutputs(node.id);
gpu_model->const_tensors[outputs[0]->id] =
tensor_reserver->Get(outputs[0]->id);
gpu_model->const_tensors[outputs[0]->id].UploadData(attr.tensor);
continue;
}
GPUOperationsSubgraph gpu_subgraph;
if (GPUSubgraphFromGraph(create_info.hints, gpu_info, create_info.precision,
graph, node.id, tensor_descriptors,
&consumed_nodes, &gpu_subgraph)
.ok()) {
} else {
auto inputs = graph.FindInputs(node.id);
auto outputs = graph.FindOutputs(node.id);
if (IsAssociativeLinkableOp(node, inputs, outputs)) {
int latest_written_tensor_index = 0;
int last_usage = tensor_usages[inputs[0]->id];
for (int j = 1; j < inputs.size(); ++j) {
if (tensor_usages[inputs[j]->id] > last_usage) {
last_usage = tensor_usages[inputs[j]->id];
latest_written_tensor_index = j;
}
}
std::swap(inputs[0], inputs[latest_written_tensor_index]);
}
consumed_nodes.insert(node.id);
OperationDef op_def;
op_def.precision = create_info.precision;
for (int j = 0; j < inputs.size(); ++j) {
op_def.src_tensors.push_back(tensor_reserver->Get(inputs[j]->id));
}
for (int j = 0; j < outputs.size(); ++j) {
op_def.dst_tensors.push_back(tensor_reserver->Get(outputs[j]->id));
}
RETURN_IF_ERROR(GPUOperationFromNode(
gpu_info, op_def, create_info.hints, inputs, outputs, node,
shared_conv_weights_ptr, &gpu_subgraph));
}
absl::flat_hash_map<int, ValueId> mapping_to_global_ids;
for (int j = 0; j < gpu_subgraph.new_tensors.size(); ++j) {
const auto& t = gpu_subgraph.new_tensors[j];
if (!t.GetData().empty()) {
auto global_id = tensor_reserver->GetNewId();
gpu_model->const_tensors[global_id] =
std::move(gpu_subgraph.new_tensors[j]);
mapping_to_global_ids[j] = global_id;
} else {
auto global_id = tensor_reserver->Add(t);
mapping_to_global_ids[j] = global_id;
}
}
if (!shared_conv_weights.empty() && !mapping_to_global_ids.empty()) {
shared_conv_weights.back().RemapIds(mapping_to_global_ids);
}
for (auto& gpu_op : gpu_subgraph.operations) {
GpuNode gpu_node;
gpu_node.gpu_operation = std::move(gpu_op.operation);
gpu_node.inputs.resize(gpu_op.input_ids.size());
for (int j = 0; j < gpu_op.input_ids.size(); ++j) {
int id = gpu_op.input_ids[j];
if (id >= 0) {
gpu_node.inputs[j] = id;
} else {
gpu_node.inputs[j] = mapping_to_global_ids[-(id + 1)];
}
}
gpu_node.outputs.resize(gpu_op.output_ids.size());
for (int j = 0; j < gpu_op.output_ids.size(); ++j) {
int id = gpu_op.output_ids[j];
if (id >= 0) {
gpu_node.outputs[j] = id;
tensor_usages[id] = i;
} else {
gpu_node.outputs[j] = mapping_to_global_ids[-(id + 1)];
}
}
gpu_node.name = gpu_op.name;
gpu_model->nodes.push_back(std::move(gpu_node));
}
}
return absl::OkStatus();
}
absl::Status MergeElementwiseNodes(const GpuInfo& gpu_info,
GpuModel* gpu_model) {
auto& nodes = gpu_model->nodes;
for (int elem_root_index = 1; elem_root_index < nodes.size();
++elem_root_index) {
auto& elem_root = nodes[elem_root_index];
if (!(elem_root.inputs.size() == 1 || elem_root.inputs.size() == 2) ||
elem_root.outputs.size() != 1 ||
!elem_root.gpu_operation->IsLinkable()) {
continue;
}
std::map<int, int> prev_nodes;
for (int j = elem_root_index - 1; j >= 0; --j) {
for (int k = 0; k < elem_root.inputs.size(); ++k) {
if (elem_root.inputs[k] == nodes[j].outputs[0]) {
prev_nodes[k] = j;
break;
}
}
}
if (prev_nodes.size() == 1) {
if (elem_root.inputs.size() != 1) {
continue;
}
const int prev_first_node_index = prev_nodes[0];
auto& prev_node = nodes[prev_first_node_index];
if (prev_node.inputs.size() != 1 || prev_node.outputs.size() != 1 ||
!prev_node.gpu_operation->IsLinkable()) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(FuseSimpleElemWithSimpleElem(
gpu_info, std::move(*prev_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_first_node_index] = std::move(new_node);
elem_root_index = prev_first_node_index;
continue;
}
if (prev_nodes.size() == 2) {
if (elem_root.inputs.size() != 2 ||
elem_root.gpu_operation->GetElementwiseInputsCount() != 2) {
continue;
}
const int prev_first_node_index = prev_nodes[0];
const int prev_second_node_index = prev_nodes[1];
auto& prev_first_node = nodes[prev_first_node_index];
auto& prev_second_node = nodes[prev_second_node_index];
if (prev_first_node.gpu_operation->IsLinkable() &&
!prev_second_node.gpu_operation->IsLinkable() &&
prev_second_node.outputs.size() == 1 &&
prev_first_node.inputs.size() == 1 &&
prev_first_node.outputs.size() == 1) {
int first_node_parent_index = -1;
for (int j = prev_first_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_first_node.inputs[0]) {
first_node_parent_index = j;
break;
}
}
if (first_node_parent_index == -1 ||
first_node_parent_index != prev_second_node_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWithSimpleElemAsFirstInput(
gpu_info, std::move(*prev_first_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_first_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_first_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_first_node_index] = std::move(new_node);
elem_root_index = prev_first_node_index;
continue;
}
if (!prev_first_node.gpu_operation->IsLinkable() &&
prev_second_node.gpu_operation->IsLinkable() &&
prev_first_node.outputs.size() == 1 &&
prev_second_node.inputs.size() == 1 &&
prev_second_node.outputs.size() == 1) {
int second_node_parent_index = -1;
for (int j = prev_second_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_second_node.inputs[0]) {
second_node_parent_index = j;
break;
}
}
if (second_node_parent_index == -1 ||
second_node_parent_index != prev_first_node_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[1]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWithSimpleElemAsSecondInput(
gpu_info, std::move(*prev_second_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_second_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_second_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_second_node_index] = std::move(new_node);
elem_root_index = prev_second_node_index;
continue;
}
if (prev_first_node.gpu_operation->IsLinkable() &&
prev_second_node.gpu_operation->IsLinkable() &&
prev_first_node.inputs.size() == 1 &&
prev_first_node.outputs.size() == 1 &&
prev_second_node.inputs.size() == 1 &&
prev_second_node.outputs.size() == 1) {
int first_node_parent_index = -1;
for (int j = prev_first_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_first_node.inputs[0]) {
first_node_parent_index = j;
break;
}
}
int second_node_parent_index = -1;
for (int j = prev_second_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_second_node.inputs[0]) {
second_node_parent_index = j;
break;
}
}
if (first_node_parent_index == -1 || second_node_parent_index == -1 ||
first_node_parent_index != second_node_parent_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[1]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWith2SimpleElem(
gpu_info, std::move(*prev_first_node.gpu_operation.get()),
std::move(*prev_second_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_first_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_first_node.name + " -> " + prev_second_node.name +
" -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
int first_prev_node_index =
std::min(prev_first_node_index, prev_second_node_index);
int second_prev_node_index =
std::max(prev_first_node_index, prev_second_node_index);
nodes.erase(nodes.begin() + elem_root_index);
nodes.erase(nodes.begin() + second_prev_node_index);
nodes[first_prev_node_index] = std::move(new_node);
elem_root_index = first_prev_node_index - 1;
continue;
}
}
}
return absl::OkStatus();
}
absl::Status MergeNodes(const GpuInfo& gpu_info, GpuModel* gpu_model) {
absl::flat_hash_set<ValueId> ready_tensors;
absl::flat_hash_set<ValueId> output_tensors;
for (const auto& input : gpu_model->input_ids_and_refs) {
ready_tensors.insert(input.first);
}
for (const auto& output : gpu_model->output_ids_and_refs) {
output_tensors.insert(output.first);
}
auto& nodes = gpu_model->nodes;
for (int i = 0; i < nodes.size(); ++i) {
auto& node = nodes[i];
bool node_has_graph_output = false;
for (const auto& out_id : node.outputs) {
ready_tensors.insert(out_id);
if (output_tensors.find(out_id) != output_tensors.end()) {
node_has_graph_output = true;
}
}
if (node_has_graph_output || node.outputs.size() != 1) {
continue;
}
std::vector<int> next_nodes;
int link_index = 0;
for (int j = i + 1; j < nodes.size(); ++j) {
for (int k = 0; k < nodes[j].inputs.size(); ++k) {
if (nodes[j].inputs[k] == node.outputs[0]) {
next_nodes.push_back(j);
link_index = k;
}
}
}
if (next_nodes.size() != 1 || link_index != 0) {
continue;
}
auto& linkable_node = nodes[next_nodes[0]];
if (!linkable_node.gpu_operation->IsLinkable() ||
linkable_node.outputs.size() != 1 ||
!IsReady(ready_tensors, linkable_node)) {
continue;
}
RETURN_IF_ERROR(MergeGpuNodes(gpu_info, &linkable_node, &node));
nodes.erase(nodes.begin() + next_nodes[0]);
i -= 1;
}
return absl::OkStatus();
}
void CopyExternals(const GraphFloat32& graph, GpuModel* gpu_model) {
const auto inputs = graph.inputs();
for (const auto& value : inputs) {
gpu_model->input_ids_and_refs.push_back({value->id, value->tensor.ref});
}
const auto variable_inputs = graph.variable_inputs();
for (const auto& value : variable_inputs) {
gpu_model->variable_ids_and_refs.push_back({value->id, value->tensor.ref});
}
const auto outputs = graph.outputs();
for (const auto& value : outputs) {
gpu_model->output_ids_and_refs.push_back({value->id, value->tensor.ref});
}
}
void RemoveUnusedTensors(GpuModel* gpu_model) {
absl::flat_hash_set<ValueId> used_tensors;
for (const auto& node : gpu_model->nodes) {
for (const auto& id : node.inputs) {
used_tensors.insert(id);
}
for (const auto& id : node.outputs) {
used_tensors.insert(id);
}
}
for (const auto& inputs : gpu_model->input_ids_and_refs) {
used_tensors.insert(inputs.first);
}
for (const auto& outputs : gpu_model->output_ids_and_refs) {
used_tensors.insert(outputs.first);
}
for (auto it = gpu_model->tensors.begin(); it != gpu_model->tensors.end();) {
if (used_tensors.find(it->first) == used_tensors.end()) {
gpu_model->tensors.erase(it++);
} else {
++it;
}
}
}
absl::Status ResolvePolymorphicArgs(GpuModel* gpu_model) {
class DummySpatialTensor : public GpuSpatialTensor {
public:
DummySpatialTensor() = default;
explicit DummySpatialTensor(const BHWDC& shape,
const TensorDescriptor& tensor_desc)
: shape_(shape), tensor_desc_(tensor_desc) {}
~DummySpatialTensor() override = default;
int Width() const override { return shape_.w; }
int Height() const override { return shape_.h; }
int Depth() const override { return shape_.d; }
int Channels() const override { return shape_.c; }
int Slices() const override { return DivideRoundUp(shape_.c, 4); }
int Batch() const override { return shape_.b; }
TensorDescriptor GetDescriptor() const override { return tensor_desc_; }
private:
BHWDC shape_;
TensorDescriptor tensor_desc_;
};
for (auto& node : gpu_model->nodes) {
std::vector<DummySpatialTensor> src_tensors(node.inputs.size());
for (int i = 0; i < node.inputs.size(); ++i) {
const auto& tensor_desc = gpu_model->tensors[node.inputs[i]];
src_tensors[i] =
DummySpatialTensor(tensor_desc.GetBHWDCShape(), tensor_desc);
node.gpu_operation->SetSrc(&src_tensors[i], i);
}
std::vector<DummySpatialTensor> dst_tensors(node.outputs.size());
for (int i = 0; i < node.outputs.size(); ++i) {
const auto& tensor_desc = gpu_model->tensors[node.outputs[i]];
dst_tensors[i] =
DummySpatialTensor(tensor_desc.GetBHWDCShape(), tensor_desc);
node.gpu_operation->SetDst(&dst_tensors[i], i);
}
RETURN_IF_ERROR(
node.gpu_operation->BindArguments(&node.gpu_operation->args_));
node.gpu_operation->RecalculateGridSize();
}
return absl::OkStatus();
}
}
absl::Status GraphToGpuModel(const GraphFloat32& graph,
const CreateGpuModelInfo& create_info,
const GpuInfo& gpu_info, GpuModel* gpu_model) {
TensorReserver tensor_reserver;
RETURN_IF_ERROR(
ReserveGraphTensors(create_info, gpu_info, graph, &tensor_reserver));
CopyExternals(graph, gpu_model);
RETURN_IF_ERROR(ConvertOperations(gpu_info, graph, create_info,
&tensor_reserver, gpu_model));
RETURN_IF_ERROR(MergeElementwiseNodes(gpu_info, gpu_model));
RETURN_IF_ERROR(MergeNodes(gpu_info, gpu_model));
gpu_model->tensors = std::move(tensor_reserver.reservations_);
RemoveUnusedTensors(gpu_model);
for (auto& node : gpu_model->nodes) {
RETURN_IF_ERROR(node.gpu_operation->AssembleCode(gpu_info));
}
return ResolvePolymorphicArgs(gpu_model);
}
flatbuffers::Offset<data::GpuModel> Encode(
const GpuModel& gpu_model, flatbuffers::FlatBufferBuilder* builder) {
std::vector<int32_t> in_ids(gpu_model.input_ids_and_refs.size());
std::vector<int64_t> in_refs(gpu_model.input_ids_and_refs.size());
for (int i = 0; i < in_ids.size(); ++i) {
in_ids[i] = gpu_model.input_ids_and_refs[i].first;
in_refs[i] = gpu_model.input_ids_and_refs[i].second;
}
auto in_ids_fb = builder->CreateVector(in_ids);
auto in_refs_fb = builder->CreateVector(in_refs);
std::vector<int32_t> out_ids(gpu_model.output_ids_and_refs.size());
std::vector<int64_t> out_refs(gpu_model.output_ids_and_refs.size());
for (int i = 0; i < out_ids.size(); ++i) {
out_ids[i] = gpu_model.output_ids_and_refs[i].first;
out_refs[i] = gpu_model.output_ids_and_refs[i].second;
}
auto out_ids_fb = builder->CreateVector(out_ids);
auto out_refs_fb = builder->CreateVector(out_refs);
std::vector<flatbuffers::Offset<data::GpuNode>> nodes_fb;
for (int i = 0; i < gpu_model.nodes.size(); ++i) {
auto node_fb = Encode(gpu_model.nodes[i], builder);
nodes_fb.push_back(node_fb);
}
auto nodes_fb_vec = builder->CreateVector(nodes_fb);
std::vector<flatbuffers::Offset<data::TensorDescWithId>> tensors_fb;
for (const auto& tensor : gpu_model.tensors) {
auto tensor_fb = Encode(tensor.second, tensor.first, builder);
tensors_fb.push_back(tensor_fb);
}
auto tensors_fb_vec = builder->CreateVector(tensors_fb);
std::vector<flatbuffers::Offset<data::TensorDescWithId>> const_tensors_fb;
for (const auto& tensor : gp | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_model_test_util.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, LinkingConvolutionAndCosOp) {
auto status = TestLinkingConvolutionAndCosOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMul) {
auto status = TestLinkingConvolution2InputMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputBroadcastMul2InputMul) {
auto status = TestLinkingConvolution2InputBroadcastMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputBroadcastMul) {
auto status = TestLinkingConvolution2InputMul2InputBroadcastMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMulCos) {
auto status = TestLinkingConvolution2InputMul2InputMulCos(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanh2InputDiff) {
auto status = TestLinkingConvolutionFirstTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionSecondTanh2InputDiff) {
auto status = TestLinkingConvolutionSecondTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanhSecondCos2InputDiff) {
auto status = TestLinkingConvolutionFirstTanhSecondCos2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingComplex0) {
auto status = TestLinkingComplex0(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvElem2InputAddElemsOp) {
auto status = TestLinkingConvElem2InputAddElemsOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingSliceCastOp) {
auto status = TestLinkingSliceCastOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddAddMulOp) {
auto status = TestLinkingAddAddMulOp(&exec_env_,
true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddMulOp) {
auto status =
TestLinkingAddAddMulOp(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | absl::Status MergeGpuNodes(const GpuInfo& gpu_info, GpuNode* src,
GpuNode* dst) {
for (int j = 1; j < src->inputs.size(); ++j) {
dst->inputs.push_back(src->inputs[j]);
}
dst->outputs[0] = src->outputs[0];
dst->name += " -> " + src->name;
return dst->gpu_operation->AddOperation(gpu_info, src->gpu_operation.get());
} | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_model_test_util.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, LinkingConvolutionAndCosOp) {
auto status = TestLinkingConvolutionAndCosOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMul) {
auto status = TestLinkingConvolution2InputMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputBroadcastMul2InputMul) {
auto status = TestLinkingConvolution2InputBroadcastMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputBroadcastMul) {
auto status = TestLinkingConvolution2InputMul2InputBroadcastMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMulCos) {
auto status = TestLinkingConvolution2InputMul2InputMulCos(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanh2InputDiff) {
auto status = TestLinkingConvolutionFirstTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionSecondTanh2InputDiff) {
auto status = TestLinkingConvolutionSecondTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanhSecondCos2InputDiff) {
auto status = TestLinkingConvolutionFirstTanhSecondCos2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingComplex0) {
auto status = TestLinkingComplex0(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvElem2InputAddElemsOp) {
auto status = TestLinkingConvElem2InputAddElemsOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingSliceCastOp) {
auto status = TestLinkingSliceCastOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddAddMulOp) {
auto status = TestLinkingAddAddMulOp(&exec_env_,
true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddMulOp) {
auto status =
TestLinkingAddAddMulOp(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
} |
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
namespace spmd {
absl::Status StatefulRngSpmdPartitioningVisitor::HandleRngGetAndUpdateState(
HloInstruction* hlo) {
if (hlo->sharding().HasUniqueDevice()) {
return HandleSingleDevice(hlo);
}
TF_RET_CHECK(hlo->sharding().IsReplicated());
auto clone =
builder()->AddInstruction(hlo->CloneWithNewOperands(hlo->shape(), {}));
clone->set_sharding(hlo->sharding());
SetPartitionedHlo(
hlo, spmd::PartitionedHlo(clone, hlo->shape(), MakePartitioningState())
.Reshard(hlo->sharding()));
return absl::OkStatus();
}
std::unique_ptr<spmd::SpmdPartitioningVisitor>
StatefulRngSpmdPartitioner::CreateVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const spmd::SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, spmd::SpmdLogger* logger,
spmd::SpmdPartitionerOptions options, const CallGraph& call_graph) {
return std::make_unique<StatefulRngSpmdPartitioningVisitor>(
computation, num_partitions, num_replicas, collective_ops_creator,
next_channel_id, logger, std::move(options), this, call_graph);
}
absl::Status StatefulRngSpmdPartitioner::PreprocessSharding(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kRngGetAndUpdateState &&
!hlo->has_sharding()) {
hlo->set_sharding(HloSharding::Replicate());
}
}
}
return spmd::SpmdPartitioner::PreprocessSharding(module, execution_threads);
}
bool StatefulRngSpmdPartitioner::CanSideEffectingHaveReplicatedSharding(
const HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kRngGetAndUpdateState) return true;
return spmd::SpmdPartitioner::CanSideEffectingHaveReplicatedSharding(hlo);
}
absl::Status StatefulRngSpmdPartitioner::HandleRotateRightWhilePreprocessing(
HloComputation* computation) {
if (!computation->IsWhileBodyComputation()) {
return absl::OkStatus();
}
HloInstruction* while_loop = computation->WhileCallInstruction();
TF_RET_CHECK(while_loop);
if (computation->parent()
->config()
.debug_options()
.xla_gpu_unsafe_pipelined_loop_annotator()) {
xla::FrontendAttributes attributes;
(*attributes.mutable_map())["is_pipelined_while_loop"] = "true";
while_loop->add_frontend_attributes(attributes);
}
return absl::OkStatus();
}
}
} | #include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/rng_expander.h"
#include "xla/service/sharding_propagation.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace spmd {
namespace {
namespace op = xla::testing::opcode_matchers;
int64_t CountInstructions(const HloComputation &computation, HloOpcode opcode) {
int64_t count = 0;
for (const auto &instruction : computation.instructions()) {
if (instruction->opcode() == opcode) {
count++;
}
}
return count;
}
class StatefulRngSpmdPartitionerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
absl::string_view hlo_module, int64_t num_partitions,
DebugOptions debug_options,
std::function<void(HloPassPipeline &pipeline)> add_passes = nullptr,
bool skip_checking_windowed_einsum_users = false,
bool disable_ag_rewrite_for_multiple_consumers = false) {
HloModuleConfig config = GetModuleConfigForTest(1, num_partitions);
config.set_use_spmd_partitioning(true);
config.set_debug_options(debug_options);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
HloPassPipeline pass("partitioning");
pass.AddPass<HloVerifier>(false,
false);
if (add_passes) {
add_passes(pass);
}
pass.AddPass<ShardingPropagation>(true);
pass.AddPass<StatefulRngSpmdPartitioner>(
num_partitions,
1,
debug_options.xla_gpu_threshold_for_windowed_einsum_mib(),
debug_options.xla_gpu_multi_streamed_windowed_einsum(),
skip_checking_windowed_einsum_users,
disable_ag_rewrite_for_multiple_consumers);
pass.AddPass<HloVerifier>(false,
false);
TF_RETURN_IF_ERROR(pass.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
void VerifyNoAllReduce(HloModule *module) {
for (HloComputation *computation : module->computations()) {
for (HloInstruction *hlo : computation->instructions()) {
EXPECT_NE(hlo->opcode(), HloOpcode::kAllReduce);
}
}
}
DebugOptions GetDefaultDebugOptions() {
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(1000000);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(false);
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(false);
return debug_options;
}
};
TEST_F(StatefulRngSpmdPartitionerTest, RngReplicatedConsumer) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[50,100] parameter(0), sharding={replicated}
%mu = f32[] constant(0)
%sigma = f32[] constant(1)
%rng = f32[50,100] rng(f32[] %mu, f32[] %sigma), distribution=rng_uniform
ROOT %add = f32[50,100] add(%rng, %p0), sharding={replicated}
}
)";
auto add_passes = [](HloPassPipeline &pipeline) {
pipeline.AddPass<RngExpander>();
};
DebugOptions debug_options = GetDebugOptionsForTest();
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 2,
GetDefaultDebugOptions(), add_passes));
XLA_VLOG_LINES(1, module->ToString());
VerifyNoAllReduce(module.get());
}
TEST_F(StatefulRngSpmdPartitionerTest, RngPartitionedConsumer) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[50,100] parameter(0), sharding={replicated}
%mu = f32[] constant(0)
%sigma = f32[] constant(1)
%rng = f32[50,100] rng(f32[] %mu, f32[] %sigma), distribution=rng_uniform
ROOT %add = f32[50,100] add(%rng, %p0), sharding={devices=[2,1]0,1}
}
)";
auto add_passes = [](HloPassPipeline &pipeline) {
pipeline.AddPass<RngExpander>();
};
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 2,
GetDefaultDebugOptions(), add_passes));
XLA_VLOG_LINES(1, module->ToString());
VerifyNoAllReduce(module.get());
}
TEST_F(StatefulRngSpmdPartitionerTest,
EinsumDisableRewriteForAgWithMultipleConsumers) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={(bf16[2,2048,24576]{2,1,0}, bf16[24576,98304]{1,0}, bf16[24576,98304]{1,0})->bf16[2,2048,98304]{2,1,0}}, num_partitions=4
ENTRY main {
Arg_0.1 = bf16[2,2048,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
Arg_1.2 = bf16[24576,98304]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
dot.5 = bf16[2,2048,98304]{2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[1,1,4]<=[4]}
Arg_2.3 = bf16[24576,98304]{1,0} parameter(2), sharding={devices=[1,4]<=[4]}
dot.6 = bf16[2,2048,98304]{2,1,0} dot(Arg_0.1, Arg_2.3), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[1,1,4]<=[4]}
ROOT add.8 = bf16[2,2048,98304]{2,1,0} add(dot.5, dot.6), sharding={devices=[1,1,4]<=[4]}
}
)";
DebugOptions debug_options = GetDefaultDebugOptions();
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(0);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(true);
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4, debug_options,
nullptr,
true,
true));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(CountInstructions(*module->entry_computation(), HloOpcode::kWhile),
1);
EXPECT_EQ(CountInstructions(*module->entry_computation(), HloOpcode::kDot),
1);
EXPECT_EQ(
CountInstructions(*module->entry_computation(), HloOpcode::kAllGather),
1);
}
TEST_F(StatefulRngSpmdPartitionerTest, VerifyThresholdSetCorrectly) {
auto debug_options = HloTestBase::GetDebugOptionsForTest();
int64_t threshold = 400;
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(threshold);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(true);
StatefulRngSpmdPartitioner rng_spmd_partitioner(
2, 1,
debug_options.xla_gpu_threshold_for_windowed_einsum_mib(),
debug_options.xla_gpu_multi_streamed_windowed_einsum());
EXPECT_EQ(rng_spmd_partitioner.options().threshold_for_windowed_einsum_mib,
threshold);
EXPECT_EQ(rng_spmd_partitioner.options().unroll_windowed_einsum, true);
}
TEST_F(StatefulRngSpmdPartitionerTest,
MergedSliceThenConcatRotateRightWhileOp) {
absl::string_view hlo_string = R"(
HloModule test
%Body {
%param = (f32[12], s32[]) parameter(0)
%i = s32[] get-tuple-element(%param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%param0 = f32[12] get-tuple-element(%param), index=0, sharding={devices=[4]<=[4]}
%slice0 = f32[2] slice(%param0), slice={[10:12]}, sharding={devices=[4]<=[4]}
%slice1 = f32[10] slice(%param0), slice={[0:10]}, sharding={devices=[4]<=[4]}
%concat = f32[12] concatenate(%slice0, %slice1), dimensions={0}, sharding={devices=[4]<=[4]}
ROOT %tuple = (f32[12], s32[]) tuple(%concat, %i_plus_one)
}
%Cond {
%param.1 = (f32[12], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element(%param.1), index=1
%trip_count = s32[] constant(11)
ROOT %done = pred[] compare(%i.1, %trip_count), direction=LT
}
ENTRY %test {
%i_start = f32[12] parameter(0)
%p_start = s32[] constant(0)
%initial_tuple = (f32[12], s32[]) tuple(%i_start, %p_start)
ROOT %while = (f32[12], s32[]) while(%initial_tuple), condition=%Cond, body=%Body
}
)";
DebugOptions debug_options = GetDefaultDebugOptions();
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(true);
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4, debug_options));
const HloInstruction *whileOp =
module->entry_computation()->root_instruction();
const HloInstruction *root =
whileOp->while_body()->GetInstructionWithName("concatenate");
auto rotate =
op::Concatenate(op::CollectivePermute(op::Slice()), op::Slice());
EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]")));
EXPECT_TRUE(
whileOp->frontend_attributes().map().contains("is_pipelined_while_loop"));
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(false);
TF_ASSERT_OK_AND_ASSIGN(
module,
PartitionComputation(hlo_string, 4, debug_options));
whileOp = module->entry_computation()->root_instruction();
root = whileOp->while_body()->GetInstructionWithName("concatenate");
rotate = op::Concatenate(op::CollectivePermute(op::Slice()), op::Slice());
EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]")));
}
}
}
} | std::unique_ptr<spmd::SpmdPartitioningVisitor>
StatefulRngSpmdPartitioner::CreateVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const spmd::SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, spmd::SpmdLogger* logger,
spmd::SpmdPartitionerOptions options, const CallGraph& call_graph) {
return std::make_unique<StatefulRngSpmdPartitioningVisitor>(
computation, num_partitions, num_replicas, collective_ops_creator,
next_channel_id, logger, std::move(options), this, call_graph);
} | TEST_F(StatefulRngSpmdPartitionerTest, VerifyThresholdSetCorrectly) {
auto debug_options = HloTestBase::GetDebugOptionsForTest();
int64_t threshold = 400;
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(threshold);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(true);
StatefulRngSpmdPartitioner rng_spmd_partitioner(
2, 1,
debug_options.xla_gpu_threshold_for_windowed_einsum_mib(),
debug_options.xla_gpu_multi_streamed_windowed_einsum());
EXPECT_EQ(rng_spmd_partitioner.options().threshold_for_windowed_einsum_mib,
threshold);
EXPECT_EQ(rng_spmd_partitioner.options().unroll_windowed_einsum, true);
} |
#include "tsl/platform/cloud/curl_http_request.h"
#include <algorithm>
#include "xla/tsl/util/env_var.h"
#include "tsl/lib/gtl/map_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/scanner.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/types.h"
#define CHECK_CURL_OK(expr) CHECK_EQ(expr, CURLE_OK)
namespace tsl {
namespace {
constexpr uint64 kVerboseOutput = 0;
class LibCurlProxy : public LibCurl {
public:
static LibCurlProxy* Load() {
static LibCurlProxy* libcurl = []() -> LibCurlProxy* {
curl_global_init(CURL_GLOBAL_ALL);
return new LibCurlProxy;
}();
return libcurl;
}
CURL* curl_easy_init() override { return ::curl_easy_init(); }
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
uint64 param) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
const char* param) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
void* param) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(void*, size_t, size_t,
FILE*)) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(const void*, size_t, size_t,
void*)) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
int (*param)(void* clientp, curl_off_t dltotal,
curl_off_t dlnow, curl_off_t ultotal,
curl_off_t ulnow)) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_perform(CURL* curl) override {
return ::curl_easy_perform(curl);
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
uint64* value) override {
return ::curl_easy_getinfo(curl, info, value);
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
double* value) override {
return ::curl_easy_getinfo(curl, info, value);
}
void curl_easy_cleanup(CURL* curl) override {
return ::curl_easy_cleanup(curl);
}
char* curl_easy_escape(CURL* curl, const char* str, int length) override {
return ::curl_easy_escape(curl, str, length);
}
curl_slist* curl_slist_append(curl_slist* list, const char* str) override {
return ::curl_slist_append(list, str);
}
void curl_slist_free_all(curl_slist* list) override {
return ::curl_slist_free_all(list);
}
void curl_free(void* p) override { ::curl_free(p); }
};
}
CurlHttpRequest::CurlHttpRequest() : CurlHttpRequest(LibCurlProxy::Load()) {}
CurlHttpRequest::CurlHttpRequest(LibCurl* libcurl, Env* env)
: libcurl_(libcurl), env_(env) {
default_response_buffer_.reserve(CURL_MAX_WRITE_SIZE);
curl_ = libcurl_->curl_easy_init();
CHECK(curl_ != nullptr) << "Couldn't initialize a curl session.";
std::string value = "";
TF_CHECK_OK(ReadStringFromEnvVar("CURL_CA_BUNDLE", "", &value));
if (!value.empty()) {
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_CAINFO, value.c_str()));
}
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_VERBOSE, kVerboseOutput));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_USERAGENT, "TSL"));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_NOSIGNAL, 1L));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HTTP_VERSION,
CURL_HTTP_VERSION_1_1));
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_NOPROGRESS, uint64{0}));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFODATA, this));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFOFUNCTION,
&CurlHttpRequest::ProgressCallback));
SetResultBuffer(&default_response_buffer_);
}
CurlHttpRequest::~CurlHttpRequest() {
if (curl_headers_) {
libcurl_->curl_slist_free_all(curl_headers_);
}
if (resolve_list_) {
libcurl_->curl_slist_free_all(resolve_list_);
}
if (put_body_) {
if (fclose(put_body_) != 0) {
LOG(ERROR) << "fclose() failed: " << strerror(errno);
}
}
if (curl_) {
libcurl_->curl_easy_cleanup(curl_);
}
}
string CurlHttpRequest::EscapeString(const string& str) {
char* out_char_str = libcurl_->curl_easy_escape(curl_, str.c_str(), 0);
string out_str(out_char_str);
libcurl_->curl_free(out_char_str);
return out_str;
}
void CurlHttpRequest::SetUri(const string& uri) {
CheckNotSent();
is_uri_set_ = true;
uri_ = uri;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_URL, uri.c_str()));
}
void CurlHttpRequest::SetRange(uint64 start, uint64 end) {
CheckNotSent();
CHECK_CURL_OK(libcurl_->curl_easy_setopt(
curl_, CURLOPT_RANGE, strings::StrCat(start, "-", end).c_str()));
}
void CurlHttpRequest::AddHeader(const string& name, const string& value) {
CheckNotSent();
curl_headers_ = libcurl_->curl_slist_append(
curl_headers_, strings::StrCat(name, ": ", value).c_str());
}
void CurlHttpRequest::AddResolveOverride(const string& hostname, int64_t port,
const string& ip_addr) {
CheckNotSent();
resolve_list_ = libcurl_->curl_slist_append(
resolve_list_,
strings::StrCat(hostname, ":", port, ":", ip_addr).c_str());
}
void CurlHttpRequest::AddAuthBearerHeader(const string& auth_token) {
CheckNotSent();
if (!auth_token.empty()) {
AddHeader("Authorization", strings::StrCat("Bearer ", auth_token));
}
}
void CurlHttpRequest::SetRequestStats(RequestStats* stats) {
CheckNotSent();
CHECK(stats_ == nullptr) << "SetRequestStats already called";
stats_ = stats;
}
void CurlHttpRequest::SetDeleteRequest() {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kDelete;
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_CUSTOMREQUEST, "DELETE"));
}
Status CurlHttpRequest::SetPutFromFile(const string& body_filepath,
size_t offset) {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPut;
if (put_body_) {
if (fclose(put_body_) != 0) {
LOG(ERROR) << "fclose() failed: " << strerror(errno);
}
}
put_body_ = fopen(body_filepath.c_str(), "r");
if (!put_body_) {
return errors::InvalidArgument("Couldn't open the specified file: " +
body_filepath);
}
fseek(put_body_, 0, SEEK_END);
const auto size = ftell(put_body_) - offset;
fseek(put_body_, offset, SEEK_SET);
curl_headers_ = libcurl_->curl_slist_append(
curl_headers_, strings::StrCat("Content-Length: ", size).c_str());
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_PUT, 1));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(put_body_)));
return OkStatus();
}
void CurlHttpRequest::SetPutEmptyBody() {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPut;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_PUT, 1));
AddHeader("Content-Length", "0");
AddHeader("Transfer-Encoding", "identity");
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION,
&CurlHttpRequest::ReadCallback));
}
void CurlHttpRequest::SetPostFromBuffer(const char* buffer, size_t size) {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPost;
curl_headers_ = libcurl_->curl_slist_append(
curl_headers_, strings::StrCat("Content-Length: ", size).c_str());
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_POST, 1));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION,
&CurlHttpRequest::ReadCallback));
post_body_buffer_ = StringPiece(buffer, size);
}
void CurlHttpRequest::SetPostEmptyBody() {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPost;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_POST, 1));
AddHeader("Content-Length", "0");
AddHeader("Transfer-Encoding", "identity");
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION,
&CurlHttpRequest::ReadCallback));
}
void CurlHttpRequest::SetResultBuffer(std::vector<char>* out_buffer) {
CheckNotSent();
CHECK(out_buffer != nullptr);
out_buffer->clear();
response_buffer_ = out_buffer;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEFUNCTION,
&CurlHttpRequest::WriteCallback));
}
void CurlHttpRequest::SetResultBufferDirect(char* buffer, size_t size) {
CHECK(buffer != nullptr);
CheckNotSent();
direct_response_ = DirectResponseState{buffer, size, 0, 0};
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(
curl_, CURLOPT_WRITEFUNCTION, &CurlHttpRequest::WriteCallbackDirect));
}
bool CurlHttpRequest::IsDirectResponse() const {
return direct_response_.buffer_ != nullptr;
}
size_t CurlHttpRequest::WriteCallbackDirect(const void* ptr, size_t size,
size_t nmemb, void* userdata) {
CHECK(ptr != nullptr);
auto that = reinterpret_cast<CurlHttpRequest*>(userdata);
DirectResponseState* state = &that->direct_response_;
CHECK(state->buffer_ != nullptr);
CHECK(state->bytes_transferred_ <= state->buffer_size_);
size_t curl_bytes_received = size * nmemb;
size_t user_buffer_bytes_available =
state->buffer_size_ - state->bytes_transferred_;
size_t bytes_to_copy =
std::min<size_t>(curl_bytes_received, user_buffer_bytes_available);
memcpy(&state->buffer_[state->bytes_transferred_], ptr, bytes_to_copy);
state->bytes_transferred_ += bytes_to_copy;
state->bytes_received_ += curl_bytes_received;
return bytes_to_copy;
}
size_t CurlHttpRequest::GetResultBufferDirectBytesTransferred() {
CHECK(direct_response_.buffer_ != nullptr);
return direct_response_.bytes_transferred_;
}
void CurlHttpRequest::SetTimeouts(uint32 connection, uint32 inactivity,
uint32 total) {
CheckNotSent();
connect_timeout_secs_ = connection;
inactivity_timeout_secs_ = inactivity;
request_timeout_secs_ = total;
}
size_t CurlHttpRequest::WriteCallback(const void* ptr, size_t size,
size_t nmemb, void* this_object) {
CHECK(ptr);
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
CHECK(that->response_buffer_);
const size_t bytes_to_copy = size * nmemb;
that->response_buffer_->insert(
that->response_buffer_->end(), reinterpret_cast<const char*>(ptr),
reinterpret_cast<const char*>(ptr) + bytes_to_copy);
return bytes_to_copy;
}
size_t CurlHttpRequest::ReadCallback(void* ptr, size_t size, size_t nmemb,
FILE* this_object) {
CHECK(ptr);
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
CHECK(that->post_body_read_ <= that->post_body_buffer_.size());
const size_t bytes_to_copy = std::min(
size * nmemb, that->post_body_buffer_.size() - that->post_body_read_);
memcpy(ptr, that->post_body_buffer_.data() + that->post_body_read_,
bytes_to_copy);
that->post_body_read_ += bytes_to_copy;
return bytes_to_copy;
}
size_t CurlHttpRequest::HeaderCallback(const void* ptr, size_t size,
size_t nmemb, void* this_object) {
CHECK(ptr);
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
StringPiece header(reinterpret_cast<const char*>(ptr), size * nmemb);
StringPiece name, value;
if (strings::Scanner(header)
.ScanEscapedUntil(':')
.StopCapture()
.OneLiteral(": ")
.GetResult(&value, &name)) {
string str_value(value);
absl::StripTrailingAsciiWhitespace(&str_value);
that->response_headers_[string(name)] = str_value;
}
return size * nmemb;
}
Status CurlHttpRequest::Send() {
CheckNotSent();
CHECK(is_uri_set_) << "URI has not been set.";
is_sent_ = true;
if (curl_headers_) {
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_HTTPHEADER, curl_headers_));
}
if (resolve_list_) {
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_RESOLVE, resolve_list_));
}
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERFUNCTION,
&CurlHttpRequest::HeaderCallback));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_TIMEOUT,
request_timeout_secs_));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_CONNECTTIMEOUT,
connect_timeout_secs_));
char error_buffer[CURL_ERROR_SIZE] = {0};
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_ERRORBUFFER, error_buffer));
if (stats_ != nullptr) {
stats_->RecordRequest(this, uri_, method_);
}
const CURLcode curl_result = libcurl_->curl_easy_perform(curl_);
TF_RETURN_IF_ERROR(CURLcodeToStatus(curl_result, error_buffer));
double written_size = 0;
CHECK_CURL_OK(libcurl_->curl_easy_getinfo(curl_, CURLINFO_SIZE_DOWNLOAD,
&written_size));
CHECK_CURL_OK(libcurl_->curl_easy_getinfo(curl_, CURLINFO_RESPONSE_CODE,
&response_code_));
auto get_error_message = [this]() -> string {
string error_message = strings::StrCat(
"Error executing an HTTP request: HTTP response code ", response_code_);
StringPiece body = GetResponse();
if (!body.empty()) {
return strings::StrCat(
error_message, " with body '",
body.substr(0, std::min(body.size(), response_to_error_limit_)), "'");
}
return error_message;
};
Status result;
switch (response_code_) {
case 200:
case 201:
case 204:
case 206:
result = OkStatus();
break;
case 416:
response_buffer_->clear();
if (IsDirectResponse()) {
direct_response_.bytes_transferred_ = 0;
}
result = OkStatus();
break;
case 400:
case 406:
case 411:
case 414:
result = errors::InvalidArgument(get_error_message());
break;
case 401:
case 403:
case 407:
result = errors::PermissionDenied(get_error_message());
break;
case 404:
case 410:
result = errors::NotFound(get_error_message());
break;
case 302:
case 303:
case 304:
case 307:
case 412:
case 413:
result = errors::FailedPrecondition(get_error_message());
break;
case 308:
case 409:
case 429:
case 500:
case 502:
case 503:
default:
result = errors::Unavailable(get_error_message());
break;
}
if (!result.ok()) {
response_buffer_->clear();
}
if (stats_ != nullptr) {
stats_->RecordResponse(this, uri_, method_, result);
}
return result;
}
void CurlHttpRequest::CheckMethodNotSet() const {
CHECK(!is_method_set_) << "HTTP method has been already set.";
}
void CurlHttpRequest::CheckNotSent() const {
CHECK(!is_sent_) << "The request has already been sent.";
}
StringPiece CurlHttpRequest::GetResponse() const {
StringPiece response;
if (IsDirectResponse()) {
response = StringPiece(direct_response_.buffer_,
direct_response_.bytes_transferred_);
} else {
response = StringPiece(response_buffer_->data(), response_buffer_->size());
}
return response;
}
string CurlHttpRequest::GetResponseHeader(const string& name) const {
const auto& header = response_headers_.find(name);
return header != response_headers_.end() ? header->second : "";
}
uint64 CurlHttpRequest::GetResponseCode() const { return response_code_; }
int CurlHttpRequest::ProgressCallback(void* this_object, curl_off_t dltotal,
curl_off_t dlnow, curl_off_t ultotal,
curl_off_t ulnow) {
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
const auto now = that->env_->NowSeconds();
const auto current_progress = dlnow + ulnow;
if (that->last_progress_timestamp_ == 0 ||
current_progress > that->last_progress_bytes_) {
that->last_progress_timestamp_ = now;
that->last_progress_bytes_ = current_progress;
return 0;
}
if (now - that->last_progress_timestamp_ > that->inactivity_timeout_secs_) {
double lookup_time = -1;
const auto lookup_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_NAMELOOKUP_TIME, &lookup_time);
double connect_time = -1;
const auto connect_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_CONNECT_TIME, &connect_time);
double pretransfer_time = -1;
const auto pretransfer_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_PRETRANSFER_TIME, &pretransfer_time);
double starttransfer_time = -1;
const auto starttransfer_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_STARTTRANSFER_TIME, &starttransfer_time);
LOG(ERROR) << "The transmission of request " << this_object
<< " (URI: " << that->uri_ << ") has been stuck at "
<< current_progress << " of " << dltotal + ultotal
<< " bytes for " << now - that->last_progress_timestamp_
<< " seconds and will be aborted. CURL timing information: "
<< "lookup time: " << lookup_time << " ("
<< curl_easy_strerror(lookup_time_status)
<< "), connect time: " << connect_time << " ("
<< curl_easy_strerror(connect_time_status)
<< "), pre-transfer time: " << pretransfer_time << " ("
<< curl_easy_strerror(pretransfer_time_status)
<< "), start-transfer time: " << starttransfer_time << " ("
<< curl_easy_strerror(starttransfer_time_status) << ")";
return 1;
}
return 0;
}
Status CurlHttpRequest::CURLcodeToStatus(CURLcode code,
const char* error_buffer) {
if (code == CURLE_OK) {
return OkStatus();
}
string error_message = strings::StrCat(
"Error executing an HTTP request: libcurl code ", code, " meaning '",
curl_easy_strerror(code), "', error details: ");
if (code == CURLE_WRITE_ERROR && IsDirectResponse() &&
direct_response_.bytes_received_ > direct_response_.buffer_size_) {
string overflow_message = strings::StrCat(
"Received ", direct_response_.bytes_received_, " response bytes ",
"for a ", direct_response_.buffer_size_, "-byte buffer");
uint64 response_code = 0;
const CURLcode get_response_result = libcurl_->curl_easy_getinfo(
curl_, CURLINFO_RESPONSE_CODE, &response_code);
if (get_response_result == CURLE_OK && response_code == 416) {
return OkStatus();
}
return errors::FailedPrecondition(
strings::StrCat(error_message, overflow_message));
}
if (code == CURLE_COULDNT_RESOLVE_HOST || code == CURLE_SSL_CACERT_BADFILE) {
return errors::FailedPrecondition(
strings::StrCat(error_message, error_buffer));
}
return errors::Unavailable(
strings::StrCat(error_message, *error_buffer ? error_buffer : "(none)"));
}
} | #include "tsl/platform/cloud/curl_http_request.h"
#include <fstream>
#include <string>
#include "absl/status/status.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
const string kTestContent = "random original scratch content";
class FakeEnv : public EnvWrapper {
public:
FakeEnv() : EnvWrapper(Env::Default()) {}
uint64 NowSeconds() const override { return now_; }
uint64 now_ = 10000;
};
class FakeLibCurl : public LibCurl {
public:
FakeLibCurl(const string& response_content, uint64 response_code)
: response_content_(response_content), response_code_(response_code) {}
FakeLibCurl(const string& response_content, uint64 response_code,
std::vector<std::tuple<uint64, curl_off_t>> progress_ticks,
FakeEnv* env)
: response_content_(response_content),
response_code_(response_code),
progress_ticks_(std::move(progress_ticks)),
env_(env) {}
FakeLibCurl(const string& response_content, uint64 response_code,
const std::vector<string>& response_headers)
: response_content_(response_content),
response_code_(response_code),
response_headers_(response_headers) {}
CURL* curl_easy_init() override {
is_initialized_ = true;
return reinterpret_cast<CURL*>(this);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
uint64 param) override {
switch (option) {
case CURLOPT_POST:
is_post_ = param;
break;
case CURLOPT_PUT:
is_put_ = param;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
const char* param) override {
return curl_easy_setopt(curl, option,
reinterpret_cast<void*>(const_cast<char*>(param)));
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
void* param) override {
switch (option) {
case CURLOPT_URL:
url_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_RANGE:
range_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_CUSTOMREQUEST:
custom_request_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_HTTPHEADER:
headers_ = reinterpret_cast<std::vector<string>*>(param);
break;
case CURLOPT_ERRORBUFFER:
error_buffer_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_CAINFO:
ca_info_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_WRITEDATA:
write_data_ = reinterpret_cast<FILE*>(param);
break;
case CURLOPT_HEADERDATA:
header_data_ = reinterpret_cast<FILE*>(param);
break;
case CURLOPT_READDATA:
read_data_ = reinterpret_cast<FILE*>(param);
break;
case CURLOPT_XFERINFODATA:
progress_data_ = param;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(void*, size_t, size_t,
FILE*)) override {
read_callback_ = param;
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(const void*, size_t, size_t,
void*)) override {
switch (option) {
case CURLOPT_WRITEFUNCTION:
write_callback_ = param;
break;
case CURLOPT_HEADERFUNCTION:
header_callback_ = param;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
int (*param)(void* clientp, curl_off_t dltotal,
curl_off_t dlnow, curl_off_t ultotal,
curl_off_t ulnow)) override {
progress_callback_ = param;
return CURLE_OK;
}
CURLcode curl_easy_perform(CURL* curl) override {
if (is_post_ || is_put_) {
char buffer[3];
int bytes_read;
posted_content_ = "";
do {
bytes_read = read_callback_(buffer, 1, sizeof(buffer), read_data_);
posted_content_ =
strings::StrCat(posted_content_, StringPiece(buffer, bytes_read));
} while (bytes_read > 0);
}
if (write_data_ || write_callback_) {
size_t bytes_handled = write_callback_(
response_content_.c_str(), 1, response_content_.size(), write_data_);
if (bytes_handled != response_content_.size()) {
curl_easy_perform_result_ = CURLE_WRITE_ERROR;
}
}
for (const auto& header : response_headers_) {
header_callback_(header.c_str(), 1, header.size(), header_data_);
}
if (error_buffer_) {
strncpy(error_buffer_, curl_easy_perform_error_message_.c_str(),
curl_easy_perform_error_message_.size() + 1);
}
for (const auto& tick : progress_ticks_) {
env_->now_ = std::get<0>(tick);
if (progress_callback_(progress_data_, 0, std::get<1>(tick), 0, 0)) {
return CURLE_ABORTED_BY_CALLBACK;
}
}
return curl_easy_perform_result_;
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
uint64* value) override {
switch (info) {
case CURLINFO_RESPONSE_CODE:
*value = response_code_;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
double* value) override {
switch (info) {
case CURLINFO_SIZE_DOWNLOAD:
*value = response_content_.size();
break;
default:
break;
}
return CURLE_OK;
}
void curl_easy_cleanup(CURL* curl) override { is_cleaned_up_ = true; }
curl_slist* curl_slist_append(curl_slist* list, const char* str) override {
std::vector<string>* v = list ? reinterpret_cast<std::vector<string>*>(list)
: new std::vector<string>();
v->push_back(str);
return reinterpret_cast<curl_slist*>(v);
}
char* curl_easy_escape(CURL* curl, const char* str, int length) override {
const string victim = "/";
const string encoded = "%2F";
string temp_str = str;
std::string::size_type n = 0;
while ((n = temp_str.find(victim, n)) != std::string::npos) {
temp_str.replace(n, victim.size(), encoded);
n += encoded.size();
}
char* out_char_str = reinterpret_cast<char*>(
port::Malloc(sizeof(char) * temp_str.size() + 1));
std::copy(temp_str.begin(), temp_str.end(), out_char_str);
out_char_str[temp_str.size()] = '\0';
return out_char_str;
}
void curl_slist_free_all(curl_slist* list) override {
delete reinterpret_cast<std::vector<string>*>(list);
}
void curl_free(void* p) override { port::Free(p); }
string response_content_;
uint64 response_code_;
std::vector<string> response_headers_;
string url_;
string range_;
string custom_request_;
string ca_info_;
char* error_buffer_ = nullptr;
bool is_initialized_ = false;
bool is_cleaned_up_ = false;
std::vector<string>* headers_ = nullptr;
bool is_post_ = false;
bool is_put_ = false;
void* write_data_ = nullptr;
size_t (*write_callback_)(const void* ptr, size_t size, size_t nmemb,
void* userdata) = nullptr;
void* header_data_ = nullptr;
size_t (*header_callback_)(const void* ptr, size_t size, size_t nmemb,
void* userdata) = nullptr;
FILE* read_data_ = nullptr;
size_t (*read_callback_)(void* ptr, size_t size, size_t nmemb,
FILE* userdata) = &fread;
int (*progress_callback_)(void* clientp, curl_off_t dltotal, curl_off_t dlnow,
curl_off_t ultotal, curl_off_t ulnow) = nullptr;
void* progress_data_ = nullptr;
string posted_content_;
CURLcode curl_easy_perform_result_ = CURLE_OK;
string curl_easy_perform_error_message_;
std::vector<std::tuple<uint64, curl_off_t>> progress_ticks_;
FakeEnv* env_ = nullptr;
};
TEST(CurlHttpRequestTest, GetRequest) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_Direct) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch(100, 0);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBufferDirect(scratch.data(), scratch.capacity());
TF_EXPECT_OK(http_request.Send());
string expected_response = "get response";
size_t response_bytes_transferred =
http_request.GetResultBufferDirectBytesTransferred();
EXPECT_EQ(expected_response.size(), response_bytes_transferred);
EXPECT_EQ(
"get response",
string(scratch.begin(), scratch.begin() + response_bytes_transferred));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_CustomCaInfoFlag) {
static char set_var[] = "CURL_CA_BUNDLE=test";
putenv(set_var);
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("test", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_Direct_ResponseTooLarge) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch(5, 0);
http_request.SetUri("http:
http_request.SetResultBufferDirect(scratch.data(), scratch.size());
const Status& status = http_request.Send();
EXPECT_EQ(error::FAILED_PRECONDITION, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 23 meaning "
"'Failed writing received data to disk/application', error details: "
"Received 12 response bytes for a 5-byte buffer",
status.message());
EXPECT_EQ(5, http_request.GetResultBufferDirectBytesTransferred());
EXPECT_EQ("get r", string(scratch.begin(), scratch.begin() + 5));
}
TEST(CurlHttpRequestTest, GetRequest_Direct_RangeOutOfBound) {
FakeLibCurl libcurl("get response", 416);
CurlHttpRequest http_request(&libcurl);
const string initialScratch = "abcde";
std::vector<char> scratch;
scratch.insert(scratch.end(), initialScratch.begin(), initialScratch.end());
http_request.SetUri("http:
http_request.SetRange(0, 4);
http_request.SetResultBufferDirect(scratch.data(), scratch.size());
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ(416, http_request.GetResponseCode());
EXPECT_EQ(0, http_request.GetResultBufferDirectBytesTransferred());
EXPECT_EQ("get r", string(scratch.begin(), scratch.end()));
}
TEST(CurlHttpRequestTest, GetRequest_Empty) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.resize(0);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(scratch.empty());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_RangeOutOfBound) {
FakeLibCurl libcurl("get response", 416);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(scratch.empty());
EXPECT_EQ(416, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_503) {
FakeLibCurl libcurl("get response", 503);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
http_request.SetResultBuffer(&scratch);
const auto& status = http_request.Send();
EXPECT_EQ(error::UNAVAILABLE, status.code());
EXPECT_EQ(
"Error executing an HTTP request: HTTP response code 503 with body "
"'get response'",
status.message());
}
TEST(CurlHttpRequestTest, GetRequest_HttpCode0) {
FakeLibCurl libcurl("get response", 0);
libcurl.curl_easy_perform_result_ = CURLE_OPERATION_TIMEDOUT;
libcurl.curl_easy_perform_error_message_ = "Operation timed out";
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
const auto& status = http_request.Send();
EXPECT_EQ(error::UNAVAILABLE, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 28 meaning "
"'Timeout was reached', error details: Operation timed out",
status.message());
EXPECT_EQ(0, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_CouldntResolveHost) {
FakeLibCurl libcurl("get response", 0);
libcurl.curl_easy_perform_result_ = CURLE_COULDNT_RESOLVE_HOST;
libcurl.curl_easy_perform_error_message_ =
"Could not resolve host 'metadata'";
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
const auto& status = http_request.Send();
EXPECT_EQ(error::FAILED_PRECONDITION, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 6 meaning "
"'Couldn't resolve host name', error details: Could not resolve host "
"'metadata'",
status.message());
EXPECT_EQ(0, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_SslBadCertfile) {
FakeLibCurl libcurl("get response", 0);
libcurl.curl_easy_perform_result_ = CURLE_SSL_CACERT_BADFILE;
libcurl.curl_easy_perform_error_message_ =
"error setting certificate verify locations:";
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
const auto& status = http_request.Send();
EXPECT_EQ(error::FAILED_PRECONDITION, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 77 meaning "
"'Problem with the SSL CA cert (path? access rights?)', error details: "
"error setting certificate verify locations:",
status.message());
EXPECT_EQ(0, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, ResponseHeaders) {
FakeLibCurl libcurl(
"get response", 200,
{"Location: abcd", "Content-Type: text", "unparsable header"});
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("abcd", http_request.GetResponseHeader("Location"));
EXPECT_EQ("text", http_request.GetResponseHeader("Content-Type"));
EXPECT_EQ("", http_request.GetResponseHeader("Not-Seen-Header"));
}
TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
auto content_filename = io::JoinPath(testing::TmpDir(), "content");
std::ofstream content(content_filename, std::ofstream::binary);
content << "post body content";
content.close();
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 0));
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(2, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]);
EXPECT_TRUE(libcurl.is_put_);
EXPECT_EQ("post body content", libcurl.posted_content_);
std::remove(content_filename.c_str());
}
TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile_NonZeroOffset) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
auto content_filename = io::JoinPath(testing::TmpDir(), "content");
std::ofstream content(content_filename, std::ofstream::binary);
content << "post body content";
content.close();
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 7));
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("dy content", libcurl.posted_content_);
std::remove(content_filename.c_str());
}
TEST(CurlHttpRequestTest, PutRequest_WithoutBody) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPutEmptyBody();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(3, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]);
EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]);
EXPECT_TRUE(libcurl.is_put_);
EXPECT_EQ("", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, PostRequest_WithBody_FromMemory) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
string content = "post body content";
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPostFromBuffer(content.c_str(), content.size());
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(2, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]);
EXPECT_TRUE(libcurl.is_post_);
EXPECT_EQ("post body content", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, PostRequest_WithoutBody) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPostEmptyBody();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(3, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]);
EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]);
EXPECT_TRUE(libcurl.is_post_);
EXPECT_EQ("", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, DeleteRequest) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetDeleteRequest();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("DELETE", libcurl.custom_request_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_NoUri) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
ASSERT_DEATH((void)http_request.Send(), "URI has not been set");
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_TwoSends) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
ASSERT_DEATH((void)http_request.Send(), "The request has already been sent");
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_ReusingAfterSend) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
ASSERT_DEATH(http_request.SetUri("http:
"The request has already been sent");
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_SettingMethodTwice) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetDeleteRequest();
ASSERT_DEATH(http_request.SetPostEmptyBody(),
"HTTP method has been already set");
}
TEST(CurlHttpRequestTest, EscapeString) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
const string test_string = "a/b/c";
EXPECT_EQ("a%2Fb%2Fc", http_request.EscapeString(test_string));
}
TEST(CurlHttpRequestTest, ErrorReturnsNoResponse) {
FakeLibCurl libcurl("get response", 500);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
EXPECT_EQ(error::UNAVAILABLE, http_request.Send().code());
EXPECT_EQ("", string(scratch.begin(), scratch.end()));
}
TEST(CurlHttpRequestTest, ProgressIsOk) {
FakeEnv env;
FakeLibCurl libcurl(
"test", 200,
{
std::make_tuple(100, 0) ,
std::make_tuple(110, 0) ,
std::make_tuple(200, 100)
},
&env);
CurlHttpRequest http_request(&libcurl, &env);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
}
TEST(CurlHttpRequestTest, ProgressIsStuck) {
FakeEnv env;
FakeLibCurl libcurl(
"test", 200,
{
std::make_tuple(100, 10) ,
std::make_tuple(130, 10) ,
std::make_tuple(170, 10)
},
&env);
CurlHttpRequest http_request(&libcurl, &env);
http_request.SetUri("http:
auto status = http_request.Send();
EXPECT_EQ(error::UNAVAILABLE, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 42 meaning 'Operation "
"was aborted by an application callback', error details: (none)",
status.message());
}
class TestStats : public HttpRequest::RequestStats {
public:
~TestStats() override = default;
void RecordRequest(const HttpRequest* request, const string& uri,
HttpRequest::RequestMethod method) override {
has_recorded_request_ = true;
record_request_request_ = request;
record_request_uri_ = uri;
record_request_method_ = method;
}
void RecordResponse(const HttpRequest* request, const string& uri,
HttpRequest::RequestMethod method,
const Status& result) override {
has_recorded_response_ = true;
record_response_request_ = request;
record_response_uri_ = uri;
record_response_method_ = method;
record_response_result_ = result;
}
const HttpRequest* record_request_request_ = nullptr;
string record_request_uri_ = "http:
HttpRequest::RequestMethod record_request_method_ =
HttpRequest::RequestMethod::kGet;
const HttpRequest* record_response_request_ = nullptr;
string record_response_uri_ = "http:
HttpRequest::RequestMethod record_response_method_ =
HttpRequest::RequestMethod::kGet;
Status record_response_result_;
bool has_recorded_request_ = false;
bool has_recorded_response_ = false;
};
class StatsTestFakeLibCurl : public FakeLibCurl {
public:
StatsTestFakeLibCurl(TestStats* stats, const string& response_content,
uint64 response_code)
: FakeLibCurl(response_content, response_code), stats_(stats) {}
CURLcode curl_easy_perform(CURL* curl) override {
CHECK(!performed_request_);
performed_request_ = true;
stats_had_recorded_request_ = stats_->has_recorded_request_;
stats_had_recorded_response_ = stats_->has_recorded_response_;
return FakeLibCurl::curl_easy_perform(curl);
};
TestStats* stats_;
bool performed_request_ = false;
bool stats_had_recorded_request_;
bool stats_had_recorded_response_;
};
TEST(CurlHttpRequestTest, StatsGetSuccessful) {
TestStats stats;
StatsTestFakeLibCurl libcurl(&stats, "get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_response_method_);
TF_EXPECT_OK(stats.record_response_result_);
EXPECT_TRUE(libcurl.performed_request_);
EXPECT_TRUE(libcurl.stats_had_recorded_request_);
EXPECT_FALSE(libcurl.stats_had_recorded_response_);
}
TEST(CurlHttpRequestTest, StatsGetNotFound) {
TestStats stats;
StatsTestFakeLibCurl libcurl(&stats, "get other response", 404);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
Status s = http_request.Send();
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_response_method_);
EXPECT_TRUE(absl::IsNotFound(stats.record_response_result_));
EXPECT_EQ(s, stats.record_response_result_);
EXPECT_TRUE(libcurl.performed_request_);
EXPECT_TRUE(libcurl.stats_had_recorded_request_);
EXPECT_FALSE(libcurl.stats_had_recorded_response_);
}
TEST(CurlHttpRequestTest, StatsPost) {
TestStats stats;
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetRequestStats(&stats);
string content = "post body content";
http_request.SetUri("http:
http_request.SetPostFromBuffer(content.c_str(), content.size());
TF_EXPECT_OK(http_request.Send());
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpR | void CurlHttpRequest::AddAuthBearerHeader(const string& auth_token) {
CheckNotSent();
if (!auth_token.empty()) {
AddHeader("Authorization", strings::StrCat("Bearer ", auth_token));
}
} | TEST(CurlHttpRequestTest, GetRequest) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_Direct) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch(100, 0);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBufferDirect(scratch.data(), scratch.capacity());
TF_EXPECT_OK(http_request.Send());
string expected_response = "get response";
size_t response_bytes_transferred =
http_request.GetResultBufferDirectBytesTransferred();
EXPECT_EQ(expected_response.size(), response_bytes_transferred);
EXPECT_EQ(
"get response",
string(scratch.begin(), scratch.begin() + response_bytes_transferred));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_CustomCaInfoFlag) {
static char set_var[] = "CURL_CA_BUNDLE=test";
putenv(set_var);
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("test", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
auto content_filename = io::JoinPath(testing::TmpDir(), "content");
std::ofstream content(content_filename, std::ofstream::binary);
content << "post body content";
content.close();
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 0));
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(2, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]);
EXPECT_TRUE(libcurl.is_put_);
EXPECT_EQ("post body content", libcurl.posted_content_);
std::remove(content_filename.c_str());
}
TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile_NonZeroOffset) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
auto content_filename = io::JoinPath(testing::TmpDir(), "content");
std::ofstream content(content_filename, std::ofstream::binary);
content << "post body content";
content.close();
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 7));
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("dy content", libcurl.posted_content_);
std::remove(content_filename.c_str());
}
TEST(CurlHttpRequestTest, PutRequest_WithoutBody) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPutEmptyBody();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(3, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]);
EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]);
EXPECT_TRUE(libcurl.is_put_);
EXPECT_EQ("", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, PostRequest_WithBody_FromMemory) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
string content = "post body content";
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPostFromBuffer(content.c_str(), content.size());
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(2, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]);
EXPECT_TRUE(libcurl.is_post_);
EXPECT_EQ("post body content", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, PostRequest_WithoutBody) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPostEmptyBody();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(3, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]);
EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]);
EXPECT_TRUE(libcurl.is_post_);
EXPECT_EQ("", libcurl.posted_content_);
} |
#include "tensorflow/core/util/memmapped_file_system.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/memmapped_file_system.pb.h"
namespace tensorflow {
namespace {
uint64 DecodeUint64LittleEndian(const uint8* buffer) {
uint64 result = 0;
for (int i = 0; i < static_cast<int>(sizeof(uint64)); ++i) {
result |= static_cast<uint64>(buffer[i]) << (8 * i);
}
return result;
}
}
namespace {
class ReadOnlyMemoryRegionFromMemmapped : public ReadOnlyMemoryRegion {
public:
ReadOnlyMemoryRegionFromMemmapped(const void* data, uint64 length)
: data_(data), length_(length) {}
~ReadOnlyMemoryRegionFromMemmapped() override = default;
const void* data() override { return data_; }
uint64 length() override { return length_; }
private:
const void* const data_;
const uint64 length_;
};
class RandomAccessFileFromMemmapped : public RandomAccessFile {
public:
RandomAccessFileFromMemmapped(const void* data, uint64 length)
: data_(data), length_(length) {}
~RandomAccessFileFromMemmapped() override = default;
Status Name(StringPiece* result) const override {
return errors::Unimplemented(
"RandomAccessFileFromMemmapped does not support Name()");
}
Status Read(uint64 offset, size_t to_read, StringPiece* result,
char* scratch) const override {
if (offset >= length_) {
*result = StringPiece(scratch, 0);
return Status(absl::StatusCode::kOutOfRange, "Read after file end");
}
const uint64 region_left =
std::min(length_ - offset, static_cast<uint64>(to_read));
*result =
StringPiece(reinterpret_cast<const char*>(data_) + offset, region_left);
return (region_left == to_read) ? absl::OkStatus()
: Status(absl::StatusCode::kOutOfRange,
"Read less bytes than requested");
}
private:
const void* const data_;
const uint64 length_;
};
}
MemmappedFileSystem::MemmappedFileSystem() = default;
Status MemmappedFileSystem::FileExists(const string& fname,
TransactionToken* token) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(fname);
if (dir_element != directory_.end()) {
return absl::OkStatus();
}
return errors::NotFound(fname, " not found");
}
Status MemmappedFileSystem::NewRandomAccessFile(
const string& filename, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(filename);
if (dir_element == directory_.end()) {
return errors::NotFound("Region ", filename, " is not found");
}
*result = std::make_unique<RandomAccessFileFromMemmapped>(
GetMemoryWithOffset(dir_element->second.offset),
dir_element->second.length);
return absl::OkStatus();
}
Status MemmappedFileSystem::NewReadOnlyMemoryRegionFromFile(
const string& filename, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(filename);
if (dir_element == directory_.end()) {
return errors::NotFound("Region ", filename, " is not found");
}
*result = std::make_unique<ReadOnlyMemoryRegionFromMemmapped>(
GetMemoryWithOffset(dir_element->second.offset),
dir_element->second.length);
return absl::OkStatus();
}
Status MemmappedFileSystem::GetFileSize(const string& filename,
TransactionToken* token, uint64* size) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(filename);
if (dir_element == directory_.end()) {
return errors::NotFound("Region ", filename, " is not found");
}
*size = dir_element->second.length;
return absl::OkStatus();
}
Status MemmappedFileSystem::Stat(const string& fname, TransactionToken* token,
FileStatistics* stat) {
uint64 size;
auto status = GetFileSize(fname, token, &size);
if (status.ok()) {
stat->length = size;
}
return status;
}
Status MemmappedFileSystem::NewWritableFile(const string& filename,
TransactionToken* token,
std::unique_ptr<WritableFile>* wf) {
return errors::Unimplemented("memmapped format doesn't support writing");
}
Status MemmappedFileSystem::NewAppendableFile(
const string& filename, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
return errors::Unimplemented("memmapped format doesn't support writing");
}
Status MemmappedFileSystem::GetChildren(const string& filename,
TransactionToken* token,
std::vector<string>* strings) {
return errors::Unimplemented("memmapped format doesn't support GetChildren");
}
Status MemmappedFileSystem::GetMatchingPaths(const string& pattern,
TransactionToken* token,
std::vector<string>* results) {
return errors::Unimplemented(
"memmapped format doesn't support GetMatchingPaths");
}
Status MemmappedFileSystem::DeleteFile(const string& filename,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support DeleteFile");
}
Status MemmappedFileSystem::CreateDir(const string& dirname,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support CreateDir");
}
Status MemmappedFileSystem::DeleteDir(const string& dirname,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support DeleteDir");
}
Status MemmappedFileSystem::RenameFile(const string& filename_from,
const string& filename_to,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support RenameFile");
}
const void* MemmappedFileSystem::GetMemoryWithOffset(uint64 offset) const {
return reinterpret_cast<const uint8*>(mapped_memory_->data()) + offset;
}
constexpr const char MemmappedFileSystem::kMemmappedPackagePrefix[];
constexpr const char MemmappedFileSystem::kMemmappedPackageDefaultGraphDef[];
Status MemmappedFileSystem::InitializeFromFile(Env* env,
const string& filename) {
TF_RETURN_IF_ERROR(
env->NewReadOnlyMemoryRegionFromFile(filename, &mapped_memory_));
directory_.clear();
if (mapped_memory_->length() <= sizeof(uint64)) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Invalid package size");
}
const auto memory_start =
reinterpret_cast<const uint8*>(mapped_memory_->data());
const uint64 directory_offset = DecodeUint64LittleEndian(
memory_start + mapped_memory_->length() - sizeof(uint64));
if (directory_offset > mapped_memory_->length() - sizeof(uint64)) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Invalid directory offset");
}
MemmappedFileSystemDirectory proto_directory;
if (!ParseProtoUnlimited(
&proto_directory, memory_start + directory_offset,
mapped_memory_->length() - directory_offset - sizeof(uint64))) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Can't parse its internal directory");
}
uint64 prev_element_offset = directory_offset;
for (auto element_iter = proto_directory.element().rbegin();
element_iter != proto_directory.element().rend(); ++element_iter) {
if (element_iter->offset() >= prev_element_offset) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Invalid offset of internal component");
}
if (!directory_
.insert(std::make_pair(
element_iter->name(),
FileRegion(element_iter->offset(), element_iter->length())))
.second) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Duplicate name of internal component ",
element_iter->name());
}
prev_element_offset = element_iter->offset();
}
return absl::OkStatus();
}
bool MemmappedFileSystem::IsMemmappedPackageFilename(const string& filename) {
return absl::StartsWith(filename, kMemmappedPackagePrefix);
}
namespace {
bool IsValidRegionChar(char c) {
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') || c == '_' || c == '.';
}
}
bool MemmappedFileSystem::IsWellFormedMemmappedPackageFilename(
const string& filename) {
if (!IsMemmappedPackageFilename(filename)) {
return false;
}
for (char c :
filename.substr(strlen(kMemmappedPackagePrefix),
filename.length() - strlen(kMemmappedPackagePrefix))) {
if (!IsValidRegionChar(c)) {
return false;
}
}
return true;
}
MemmappedEnv::MemmappedEnv(Env* env) : EnvWrapper(env) {}
Status MemmappedEnv::GetFileSystemForFile(const string& fname,
FileSystem** result) {
if (MemmappedFileSystem::IsMemmappedPackageFilename(fname)) {
if (!memmapped_file_system_) {
return errors::FailedPrecondition(
"MemmappedEnv is not initialized from a file.");
}
*result = memmapped_file_system_.get();
return absl::OkStatus();
}
return EnvWrapper::GetFileSystemForFile(fname, result);
}
Status MemmappedEnv::GetRegisteredFileSystemSchemes(
std::vector<string>* schemes) {
const auto status = EnvWrapper::GetRegisteredFileSystemSchemes(schemes);
if (status.ok()) {
schemes->emplace_back(MemmappedFileSystem::kMemmappedPackagePrefix);
}
return status;
}
Status MemmappedEnv::InitializeFromFile(const string& package_filename) {
std::unique_ptr<MemmappedFileSystem> file_system_ptr(new MemmappedFileSystem);
const auto status =
file_system_ptr->InitializeFromFile(target(), package_filename);
if (status.ok()) {
memmapped_file_system_ = std::move(file_system_ptr);
}
return status;
}
} | #include "tensorflow/core/util/memmapped_file_system.h"
#include <memory>
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/memmapped_file_system_writer.h"
#ifdef PLATFORM_WINDOWS
#undef DeleteFile
#endif
namespace tensorflow {
namespace {
constexpr char kTensor1FileName[] = "memmapped_package:
constexpr char kTensor2FileName[] = "memmapped_package:
constexpr char kProtoFileName[] = "memmapped_package:
constexpr int kTestGraphDefVersion = 666;
Status CreateMemmappedFileSystemFile(const string& filename, bool corrupted,
Tensor* test_tensor) {
Env* env = Env::Default();
MemmappedFileSystemWriter writer;
TF_RETURN_IF_ERROR(writer.InitializeToFile(env, filename));
test::FillFn<float>(test_tensor,
[](int i) { return static_cast<float>(i * i); });
TF_RETURN_IF_ERROR(writer.SaveTensor(*test_tensor, kTensor1FileName));
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(kTestGraphDefVersion);
graph_def.mutable_versions()->set_min_consumer(kTestGraphDefVersion);
TF_RETURN_IF_ERROR(writer.SaveProtobuf(graph_def, kProtoFileName));
test::FillFn<float>(test_tensor,
[](int i) { return static_cast<float>(i) * i * i; });
TF_RETURN_IF_ERROR(writer.SaveTensor(*test_tensor, kTensor2FileName));
if (!corrupted) {
TF_RETURN_IF_ERROR(writer.FlushAndClose());
}
return absl::OkStatus();
}
TEST(MemmappedFileSystemTest, SimpleTest) {
const TensorShape test_tensor_shape = {10, 200};
Tensor test_tensor(DT_FLOAT, test_tensor_shape);
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "memmapped_env_test");
TF_ASSERT_OK(CreateMemmappedFileSystemFile(filename, false, &test_tensor));
MemmappedEnv memmapped_env(Env::Default());
TF_ASSERT_OK(memmapped_env.InitializeFromFile(filename));
GraphDef test_graph_def;
TF_EXPECT_OK(
ReadBinaryProto(&memmapped_env, kProtoFileName, &test_graph_def));
EXPECT_EQ(kTestGraphDefVersion, test_graph_def.versions().producer());
EXPECT_EQ(kTestGraphDefVersion, test_graph_def.versions().min_consumer());
std::unique_ptr<ReadOnlyMemoryRegion> memory_region;
TF_ASSERT_OK(memmapped_env.NewReadOnlyMemoryRegionFromFile(kTensor2FileName,
&memory_region));
ASSERT_GE(memory_region->length(), test_tensor.TotalBytes());
EXPECT_EQ(test_tensor.tensor_data(),
StringPiece(static_cast<const char*>(memory_region->data()),
test_tensor.TotalBytes()));
uint64 file_size = 0;
TF_ASSERT_OK(memmapped_env.GetFileSize(kTensor2FileName, &file_size));
EXPECT_EQ(test_tensor.TotalBytes(), file_size);
FileStatistics stat;
TF_ASSERT_OK(memmapped_env.Stat(kTensor2FileName, &stat));
EXPECT_EQ(test_tensor.TotalBytes(), stat.length);
EXPECT_EQ(
error::NOT_FOUND,
memmapped_env.NewReadOnlyMemoryRegionFromFile("bla-bla", &memory_region)
.code());
TF_EXPECT_OK(memmapped_env.FileExists(kTensor2FileName));
EXPECT_EQ(error::Code::NOT_FOUND,
memmapped_env.FileExists("bla-bla-bla").code());
}
TEST(MemmappedFileSystemTest, NotInitialized) {
MemmappedEnv memmapped_env(Env::Default());
std::unique_ptr<ReadOnlyMemoryRegion> memory_region;
EXPECT_EQ(
error::FAILED_PRECONDITION,
memmapped_env
.NewReadOnlyMemoryRegionFromFile(kTensor1FileName, &memory_region)
.code());
std::unique_ptr<RandomAccessFile> file;
EXPECT_EQ(error::FAILED_PRECONDITION,
memmapped_env.NewRandomAccessFile(kProtoFileName, &file).code());
}
TEST(MemmappedFileSystemTest, Corrupted) {
const TensorShape test_tensor_shape = {100, 200};
Tensor test_tensor(DT_FLOAT, test_tensor_shape);
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "memmapped_env_corrupted_test");
TF_ASSERT_OK(CreateMemmappedFileSystemFile(filename, true, &test_tensor));
MemmappedFileSystem memmapped_env;
ASSERT_NE(memmapped_env.InitializeFromFile(Env::Default(), filename),
absl::OkStatus());
}
TEST(MemmappedFileSystemTest, ProxyToDefault) {
MemmappedEnv memmapped_env(Env::Default());
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "test_file");
std::unique_ptr<WritableFile> writable_file_temp;
TF_ASSERT_OK(memmapped_env.NewAppendableFile(filename, &writable_file_temp));
const auto adh = [&memmapped_env, &filename](WritableFile* f) {
delete f;
TF_CHECK_OK(memmapped_env.DeleteFile(filename));
};
std::unique_ptr<WritableFile, decltype(adh)> writable_file(
writable_file_temp.release(), adh);
const string test_string = "bla-bla-bla";
TF_ASSERT_OK(writable_file->Append(test_string));
TF_ASSERT_OK(writable_file->Close());
uint64 file_length = 0;
TF_EXPECT_OK(memmapped_env.GetFileSize(filename, &file_length));
EXPECT_EQ(test_string.length(), file_length);
FileStatistics stat;
TF_EXPECT_OK(memmapped_env.Stat(filename, &stat));
EXPECT_EQ(test_string.length(), stat.length);
std::unique_ptr<RandomAccessFile> random_access_file;
TF_ASSERT_OK(
memmapped_env.NewRandomAccessFile(filename, &random_access_file));
}
}
} | Status MemmappedFileSystem::Stat(const string& fname, TransactionToken* token,
FileStatistics* stat) {
uint64 size;
auto status = GetFileSize(fname, token, &size);
if (status.ok()) {
stat->length = size;
}
return status;
} | TEST(MemmappedFileSystemTest, SimpleTest) {
const TensorShape test_tensor_shape = {10, 200};
Tensor test_tensor(DT_FLOAT, test_tensor_shape);
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "memmapped_env_test");
TF_ASSERT_OK(CreateMemmappedFileSystemFile(filename, false, &test_tensor));
MemmappedEnv memmapped_env(Env::Default());
TF_ASSERT_OK(memmapped_env.InitializeFromFile(filename));
GraphDef test_graph_def;
TF_EXPECT_OK(
ReadBinaryProto(&memmapped_env, kProtoFileName, &test_graph_def));
EXPECT_EQ(kTestGraphDefVersion, test_graph_def.versions().producer());
EXPECT_EQ(kTestGraphDefVersion, test_graph_def.versions().min_consumer());
std::unique_ptr<ReadOnlyMemoryRegion> memory_region;
TF_ASSERT_OK(memmapped_env.NewReadOnlyMemoryRegionFromFile(kTensor2FileName,
&memory_region));
ASSERT_GE(memory_region->length(), test_tensor.TotalBytes());
EXPECT_EQ(test_tensor.tensor_data(),
StringPiece(static_cast<const char*>(memory_region->data()),
test_tensor.TotalBytes()));
uint64 file_size = 0;
TF_ASSERT_OK(memmapped_env.GetFileSize(kTensor2FileName, &file_size));
EXPECT_EQ(test_tensor.TotalBytes(), file_size);
FileStatistics stat;
TF_ASSERT_OK(memmapped_env.Stat(kTensor2FileName, &stat));
EXPECT_EQ(test_tensor.TotalBytes(), stat.length);
EXPECT_EQ(
error::NOT_FOUND,
memmapped_env.NewReadOnlyMemoryRegionFromFile("bla-bla", &memory_region)
.code());
TF_EXPECT_OK(memmapped_env.FileExists(kTensor2FileName));
EXPECT_EQ(error::Code::NOT_FOUND,
memmapped_env.FileExists("bla-bla-bla").code());
}
TEST(MemmappedFileSystemTest, ProxyToDefault) {
MemmappedEnv memmapped_env(Env::Default());
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "test_file");
std::unique_ptr<WritableFile> writable_file_temp;
TF_ASSERT_OK(memmapped_env.NewAppendableFile(filename, &writable_file_temp));
const auto adh = [&memmapped_env, &filename](WritableFile* f) {
delete f;
TF_CHECK_OK(memmapped_env.DeleteFile(filename));
};
std::unique_ptr<WritableFile, decltype(adh)> writable_file(
writable_file_temp.release(), adh);
const string test_string = "bla-bla-bla";
TF_ASSERT_OK(writable_file->Append(test_string));
TF_ASSERT_OK(writable_file->Close());
uint64 file_length = 0;
TF_EXPECT_OK(memmapped_env.GetFileSize(filename, &file_length));
EXPECT_EQ(test_string.length(), file_length);
FileStatistics stat;
TF_EXPECT_OK(memmapped_env.Stat(filename, &stat));
EXPECT_EQ(test_string.length(), stat.length);
std::unique_ptr<RandomAccessFile> random_access_file;
TF_ASSERT_OK(
memmapped_env.NewRandomAccessFile(filename, &random_access_file));
} |
#include "parser/macro_expr_factory.h"
#include <utility>
#include <vector>
#include "absl/functional/overload.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "common/constant.h"
#include "common/expr.h"
namespace cel {
Expr MacroExprFactory::Copy(const Expr& expr) {
return absl::visit(
absl::Overload(
[this, &expr](const UnspecifiedExpr&) -> Expr {
return NewUnspecified(CopyId(expr));
},
[this, &expr](const Constant& const_expr) -> Expr {
return NewConst(CopyId(expr), const_expr);
},
[this, &expr](const IdentExpr& ident_expr) -> Expr {
return NewIdent(CopyId(expr), ident_expr.name());
},
[this, &expr](const SelectExpr& select_expr) -> Expr {
const auto id = CopyId(expr);
return select_expr.test_only()
? NewPresenceTest(id, Copy(select_expr.operand()),
select_expr.field())
: NewSelect(id, Copy(select_expr.operand()),
select_expr.field());
},
[this, &expr](const CallExpr& call_expr) -> Expr {
const auto id = CopyId(expr);
absl::optional<Expr> target;
if (call_expr.has_target()) {
target = Copy(call_expr.target());
}
std::vector<Expr> args;
args.reserve(call_expr.args().size());
for (const auto& arg : call_expr.args()) {
args.push_back(Copy(arg));
}
return target.has_value()
? NewMemberCall(id, call_expr.function(),
std::move(*target), std::move(args))
: NewCall(id, call_expr.function(), std::move(args));
},
[this, &expr](const ListExpr& list_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<ListExprElement> elements;
elements.reserve(list_expr.elements().size());
for (const auto& element : list_expr.elements()) {
elements.push_back(Copy(element));
}
return NewList(id, std::move(elements));
},
[this, &expr](const StructExpr& struct_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<StructExprField> fields;
fields.reserve(struct_expr.fields().size());
for (const auto& field : struct_expr.fields()) {
fields.push_back(Copy(field));
}
return NewStruct(id, struct_expr.name(), std::move(fields));
},
[this, &expr](const MapExpr& map_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<MapExprEntry> entries;
entries.reserve(map_expr.entries().size());
for (const auto& entry : map_expr.entries()) {
entries.push_back(Copy(entry));
}
return NewMap(id, std::move(entries));
},
[this, &expr](const ComprehensionExpr& comprehension_expr) -> Expr {
const auto id = CopyId(expr);
auto iter_range = Copy(comprehension_expr.iter_range());
auto accu_init = Copy(comprehension_expr.accu_init());
auto loop_condition = Copy(comprehension_expr.loop_condition());
auto loop_step = Copy(comprehension_expr.loop_step());
auto result = Copy(comprehension_expr.result());
return NewComprehension(
id, comprehension_expr.iter_var(), std::move(iter_range),
comprehension_expr.accu_var(), std::move(accu_init),
std::move(loop_condition), std::move(loop_step),
std::move(result));
}),
expr.kind());
}
ListExprElement MacroExprFactory::Copy(const ListExprElement& element) {
return NewListElement(Copy(element.expr()), element.optional());
}
StructExprField MacroExprFactory::Copy(const StructExprField& field) {
auto field_id = CopyId(field.id());
auto field_value = Copy(field.value());
return NewStructField(field_id, field.name(), std::move(field_value),
field.optional());
}
MapExprEntry MacroExprFactory::Copy(const MapExprEntry& entry) {
auto entry_id = CopyId(entry.id());
auto entry_key = Copy(entry.key());
auto entry_value = Copy(entry.value());
return NewMapEntry(entry_id, std::move(entry_key), std::move(entry_value),
entry.optional());
}
} | #include "parser/macro_expr_factory.h"
#include <cstdint>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/expr.h"
#include "common/expr_factory.h"
#include "internal/testing.h"
namespace cel {
class TestMacroExprFactory final : public MacroExprFactory {
public:
TestMacroExprFactory() : MacroExprFactory() {}
ExprId id() const { return id_; }
Expr ReportError(absl::string_view) override {
return NewUnspecified(NextId());
}
Expr ReportErrorAt(const Expr&, absl::string_view) override {
return NewUnspecified(NextId());
}
using MacroExprFactory::NewBoolConst;
using MacroExprFactory::NewCall;
using MacroExprFactory::NewComprehension;
using MacroExprFactory::NewIdent;
using MacroExprFactory::NewList;
using MacroExprFactory::NewListElement;
using MacroExprFactory::NewMap;
using MacroExprFactory::NewMapEntry;
using MacroExprFactory::NewMemberCall;
using MacroExprFactory::NewSelect;
using MacroExprFactory::NewStruct;
using MacroExprFactory::NewStructField;
using MacroExprFactory::NewUnspecified;
protected:
ExprId NextId() override { return id_++; }
ExprId CopyId(ExprId id) override {
if (id == 0) {
return 0;
}
return NextId();
}
private:
int64_t id_ = 1;
};
namespace {
TEST(MacroExprFactory, CopyUnspecified) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewUnspecified()), factory.NewUnspecified(2));
}
TEST(MacroExprFactory, CopyIdent) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewIdent("foo")), factory.NewIdent(2, "foo"));
}
TEST(MacroExprFactory, CopyConst) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewBoolConst(true)),
factory.NewBoolConst(2, true));
}
TEST(MacroExprFactory, CopySelect) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewSelect(factory.NewIdent("foo"), "bar")),
factory.NewSelect(3, factory.NewIdent(4, "foo"), "bar"));
}
TEST(MacroExprFactory, CopyCall) {
TestMacroExprFactory factory;
std::vector<Expr> copied_args;
copied_args.reserve(1);
copied_args.push_back(factory.NewIdent(6, "baz"));
EXPECT_EQ(factory.Copy(factory.NewMemberCall("bar", factory.NewIdent("foo"),
factory.NewIdent("baz"))),
factory.NewMemberCall(4, "bar", factory.NewIdent(5, "foo"),
absl::MakeSpan(copied_args)));
}
TEST(MacroExprFactory, CopyList) {
TestMacroExprFactory factory;
std::vector<ListExprElement> copied_elements;
copied_elements.reserve(1);
copied_elements.push_back(factory.NewListElement(factory.NewIdent(4, "foo")));
EXPECT_EQ(factory.Copy(factory.NewList(
factory.NewListElement(factory.NewIdent("foo")))),
factory.NewList(3, absl::MakeSpan(copied_elements)));
}
TEST(MacroExprFactory, CopyStruct) {
TestMacroExprFactory factory;
std::vector<StructExprField> copied_fields;
copied_fields.reserve(1);
copied_fields.push_back(
factory.NewStructField(5, "bar", factory.NewIdent(6, "baz")));
EXPECT_EQ(factory.Copy(factory.NewStruct(
"foo", factory.NewStructField("bar", factory.NewIdent("baz")))),
factory.NewStruct(4, "foo", absl::MakeSpan(copied_fields)));
}
TEST(MacroExprFactory, CopyMap) {
TestMacroExprFactory factory;
std::vector<MapExprEntry> copied_entries;
copied_entries.reserve(1);
copied_entries.push_back(factory.NewMapEntry(6, factory.NewIdent(7, "bar"),
factory.NewIdent(8, "baz")));
EXPECT_EQ(factory.Copy(factory.NewMap(factory.NewMapEntry(
factory.NewIdent("bar"), factory.NewIdent("baz")))),
factory.NewMap(5, absl::MakeSpan(copied_entries)));
}
TEST(MacroExprFactory, CopyComprehension) {
TestMacroExprFactory factory;
EXPECT_EQ(
factory.Copy(factory.NewComprehension(
"foo", factory.NewList(), "bar", factory.NewBoolConst(true),
factory.NewIdent("baz"), factory.NewIdent("foo"),
factory.NewIdent("bar"))),
factory.NewComprehension(
7, "foo", factory.NewList(8, std::vector<ListExprElement>()), "bar",
factory.NewBoolConst(9, true), factory.NewIdent(10, "baz"),
factory.NewIdent(11, "foo"), factory.NewIdent(12, "bar")));
}
}
} | MapExprEntry MacroExprFactory::Copy(const MapExprEntry& entry) {
auto entry_id = CopyId(entry.id());
auto entry_key = Copy(entry.key());
auto entry_value = Copy(entry.value());
return NewMapEntry(entry_id, std::move(entry_key), std::move(entry_value),
entry.optional());
} | TEST(MacroExprFactory, CopyMap) {
TestMacroExprFactory factory;
std::vector<MapExprEntry> copied_entries;
copied_entries.reserve(1);
copied_entries.push_back(factory.NewMapEntry(6, factory.NewIdent(7, "bar"),
factory.NewIdent(8, "baz")));
EXPECT_EQ(factory.Copy(factory.NewMap(factory.NewMapEntry(
factory.NewIdent("bar"), factory.NewIdent("baz")))),
factory.NewMap(5, absl::MakeSpan(copied_entries)));
} |
#include "xla/python/ifrt/device.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/hash/hash.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/python/ifrt/device.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
char Device::ID = 0;
DeviceList::DeviceList(Devices devices) : hash_(kUnsetHash) {
if (devices.size() <= kInlineDeviceSize) {
state_ = State{std::move(devices)};
} else {
state_ = std::make_shared<State>(State{std::move(devices)});
}
}
DeviceList::DeviceList(const DeviceList& other)
: state_(other.state_),
hash_(other.hash_.load(std::memory_order_relaxed)) {}
DeviceList::DeviceList(DeviceList&& other)
: state_(std::move(other.state_)),
hash_(other.hash_.load(std::memory_order_relaxed)) {}
DeviceList& DeviceList::operator=(const DeviceList& other) {
state_ = other.state_;
hash_.store(other.hash_.load(std::memory_order_relaxed),
std::memory_order_relaxed);
return *this;
}
DeviceList& DeviceList::operator=(DeviceList&& other) {
state_ = std::move(other.state_);
hash_.store(other.hash_.load(std::memory_order_relaxed),
std::memory_order_relaxed);
return *this;
}
absl::StatusOr<DeviceList> DeviceList::FromProto(LookupDeviceFunc lookup_device,
const DeviceListProto& proto) {
DeviceList::Devices devices;
devices.reserve(proto.device_ids_size());
for (int device_id : proto.device_ids()) {
TF_ASSIGN_OR_RETURN(Device * device, lookup_device(DeviceId(device_id)));
devices.push_back(device);
}
return DeviceList(std::move(devices));
}
DeviceListProto DeviceList::ToProto() const {
DeviceListProto proto;
proto.mutable_device_ids()->Reserve(devices().size());
for (Device* device : devices()) {
proto.mutable_device_ids()->AddAlreadyReserved(device->Id().value());
}
return proto;
}
uint64_t DeviceList::hash() const {
uint64_t hash = hash_.load(std::memory_order_relaxed);
if (ABSL_PREDICT_FALSE(hash == kUnsetHash)) {
hash = absl::HashOf(devices());
if (ABSL_PREDICT_FALSE(hash == kUnsetHash)) {
++hash;
}
hash_.store(hash, std::memory_order_relaxed);
}
return hash;
}
std::string DeviceList::DebugString() const {
return absl::StrCat("[",
absl::StrJoin(devices(), ",",
[](std::string* out, Device* device) {
absl::StrAppend(out,
device->DebugString());
}),
"]");
}
std::vector<DeviceId> GetDeviceIds(DeviceList device_list) {
std::vector<DeviceId> ids;
ids.reserve(device_list.devices().size());
for (const Device* device : device_list.devices()) {
ids.push_back(device->Id());
}
return ids;
}
}
} | #include "xla/python/ifrt/device.h"
#include <algorithm>
#include <cstdint>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/synchronization/blocking_counter.h"
#include "xla/python/ifrt/device.pb.h"
#include "xla/python/ifrt/sharding_test_util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace ifrt {
namespace {
class DeviceListTest : public test_util::ShardingTest {};
TEST_P(DeviceListTest, ToFromProto) {
auto device_list = GetDevices({0, 1});
DeviceListProto proto = device_list.ToProto();
auto lookup_device_func = [&](DeviceId device_id) -> absl::StatusOr<Device*> {
return client()->LookupDevice(device_id);
};
TF_ASSERT_OK_AND_ASSIGN(auto device_list_copy,
DeviceList::FromProto(lookup_device_func, proto));
EXPECT_EQ(device_list_copy, device_list);
}
TEST_P(DeviceListTest, IdenticalHashFromConcurrentCalls) {
auto device_list = GetDevices({0, 1});
const int num_threads = 16;
absl::BlockingCounter counter(num_threads);
tsl::thread::ThreadPool thread_pool(
tsl::Env::Default(), tsl::ThreadOptions(), "test_pool",
std::min(num_threads, tsl::port::MaxParallelism()));
std::vector<uint64_t> hashes(num_threads);
for (int i = 0; i < num_threads; ++i) {
thread_pool.Schedule([&, i]() {
hashes[i] = device_list.hash();
counter.DecrementCount();
});
}
counter.Wait();
for (int i = 0; i < num_threads; ++i) {
EXPECT_EQ(hashes[i], device_list.hash());
}
EXPECT_NE(device_list.hash(), 0);
}
TEST_P(DeviceListTest, EqualityTest) {
auto device_list1 = GetDevices({0, 1});
auto device_list2 = GetDevices({0, 1});
EXPECT_EQ(device_list1, device_list2);
auto device_list3 = device_list1;
EXPECT_EQ(device_list1, device_list3);
auto device_list4 = std::move(device_list2);
EXPECT_EQ(device_list1, device_list4);
auto device_list5 = GetDevices({0});
EXPECT_NE(device_list1, device_list5);
auto device_list6 = GetDevices({1, 0});
EXPECT_NE(device_list1, device_list6);
}
INSTANTIATE_TEST_SUITE_P(NumDevices, DeviceListTest,
testing::Values(test_util::ShardingTestParam{
2,
2}));
}
}
} | absl::StatusOr<DeviceList> DeviceList::FromProto(LookupDeviceFunc lookup_device,
const DeviceListProto& proto) {
DeviceList::Devices devices;
devices.reserve(proto.device_ids_size());
for (int device_id : proto.device_ids()) {
TF_ASSIGN_OR_RETURN(Device * device, lookup_device(DeviceId(device_id)));
devices.push_back(device);
}
return DeviceList(std::move(devices));
} | TEST_P(DeviceListTest, ToFromProto) {
auto device_list = GetDevices({0, 1});
DeviceListProto proto = device_list.ToProto();
auto lookup_device_func = [&](DeviceId device_id) -> absl::StatusOr<Device*> {
return client()->LookupDevice(device_id);
};
TF_ASSERT_OK_AND_ASSIGN(auto device_list_copy,
DeviceList::FromProto(lookup_device_func, proto));
EXPECT_EQ(device_list_copy, device_list);
} |
#include "quiche/spdy/core/hpack/hpack_header_table.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/spdy/core/hpack/hpack_constants.h"
#include "quiche/spdy/core/hpack/hpack_entry.h"
#include "quiche/spdy/core/hpack/hpack_static_table.h"
namespace spdy {
HpackHeaderTable::HpackHeaderTable()
: static_entries_(ObtainHpackStaticTable().GetStaticEntries()),
static_index_(ObtainHpackStaticTable().GetStaticIndex()),
static_name_index_(ObtainHpackStaticTable().GetStaticNameIndex()),
settings_size_bound_(kDefaultHeaderTableSizeSetting),
size_(0),
max_size_(kDefaultHeaderTableSizeSetting),
dynamic_table_insertions_(0) {}
HpackHeaderTable::~HpackHeaderTable() = default;
size_t HpackHeaderTable::GetByName(absl::string_view name) {
{
auto it = static_name_index_.find(name);
if (it != static_name_index_.end()) {
return 1 + it->second;
}
}
{
NameToEntryMap::const_iterator it = dynamic_name_index_.find(name);
if (it != dynamic_name_index_.end()) {
return dynamic_table_insertions_ - it->second + kStaticTableSize;
}
}
return kHpackEntryNotFound;
}
size_t HpackHeaderTable::GetByNameAndValue(absl::string_view name,
absl::string_view value) {
HpackLookupEntry query{name, value};
{
auto it = static_index_.find(query);
if (it != static_index_.end()) {
return 1 + it->second;
}
}
{
auto it = dynamic_index_.find(query);
if (it != dynamic_index_.end()) {
return dynamic_table_insertions_ - it->second + kStaticTableSize;
}
}
return kHpackEntryNotFound;
}
void HpackHeaderTable::SetMaxSize(size_t max_size) {
QUICHE_CHECK_LE(max_size, settings_size_bound_);
max_size_ = max_size;
if (size_ > max_size_) {
Evict(EvictionCountToReclaim(size_ - max_size_));
QUICHE_CHECK_LE(size_, max_size_);
}
}
void HpackHeaderTable::SetSettingsHeaderTableSize(size_t settings_size) {
settings_size_bound_ = settings_size;
SetMaxSize(settings_size_bound_);
}
void HpackHeaderTable::EvictionSet(absl::string_view name,
absl::string_view value,
DynamicEntryTable::iterator* begin_out,
DynamicEntryTable::iterator* end_out) {
size_t eviction_count = EvictionCountForEntry(name, value);
*begin_out = dynamic_entries_.end() - eviction_count;
*end_out = dynamic_entries_.end();
}
size_t HpackHeaderTable::EvictionCountForEntry(absl::string_view name,
absl::string_view value) const {
size_t available_size = max_size_ - size_;
size_t entry_size = HpackEntry::Size(name, value);
if (entry_size <= available_size) {
return 0;
}
return EvictionCountToReclaim(entry_size - available_size);
}
size_t HpackHeaderTable::EvictionCountToReclaim(size_t reclaim_size) const {
size_t count = 0;
for (auto it = dynamic_entries_.rbegin();
it != dynamic_entries_.rend() && reclaim_size != 0; ++it, ++count) {
reclaim_size -= std::min(reclaim_size, (*it)->Size());
}
return count;
}
void HpackHeaderTable::Evict(size_t count) {
for (size_t i = 0; i != count; ++i) {
QUICHE_CHECK(!dynamic_entries_.empty());
HpackEntry* entry = dynamic_entries_.back().get();
const size_t index = dynamic_table_insertions_ - dynamic_entries_.size();
size_ -= entry->Size();
auto it = dynamic_index_.find({entry->name(), entry->value()});
QUICHE_DCHECK(it != dynamic_index_.end());
if (it->second == index) {
dynamic_index_.erase(it);
}
auto name_it = dynamic_name_index_.find(entry->name());
QUICHE_DCHECK(name_it != dynamic_name_index_.end());
if (name_it->second == index) {
dynamic_name_index_.erase(name_it);
}
dynamic_entries_.pop_back();
}
}
const HpackEntry* HpackHeaderTable::TryAddEntry(absl::string_view name,
absl::string_view value) {
Evict(EvictionCountForEntry(name, value));
size_t entry_size = HpackEntry::Size(name, value);
if (entry_size > (max_size_ - size_)) {
QUICHE_DCHECK(dynamic_entries_.empty());
QUICHE_DCHECK_EQ(0u, size_);
return nullptr;
}
const size_t index = dynamic_table_insertions_;
dynamic_entries_.push_front(
std::make_unique<HpackEntry>(std::string(name), std::string(value)));
HpackEntry* new_entry = dynamic_entries_.front().get();
auto index_result = dynamic_index_.insert(std::make_pair(
HpackLookupEntry{new_entry->name(), new_entry->value()}, index));
if (!index_result.second) {
QUICHE_DVLOG(1) << "Found existing entry at: " << index_result.first->second
<< " replacing with: " << new_entry->GetDebugString()
<< " at: " << index;
QUICHE_DCHECK_GT(index, index_result.first->second);
dynamic_index_.erase(index_result.first);
auto insert_result = dynamic_index_.insert(std::make_pair(
HpackLookupEntry{new_entry->name(), new_entry->value()}, index));
QUICHE_CHECK(insert_result.second);
}
auto name_result =
dynamic_name_index_.insert(std::make_pair(new_entry->name(), index));
if (!name_result.second) {
QUICHE_DVLOG(1) << "Found existing entry at: " << name_result.first->second
<< " replacing with: " << new_entry->GetDebugString()
<< " at: " << index;
QUICHE_DCHECK_GT(index, name_result.first->second);
dynamic_name_index_.erase(name_result.first);
auto insert_result =
dynamic_name_index_.insert(std::make_pair(new_entry->name(), index));
QUICHE_CHECK(insert_result.second);
}
size_ += entry_size;
++dynamic_table_insertions_;
return dynamic_entries_.front().get();
}
} | #include "quiche/spdy/core/hpack/hpack_header_table.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/spdy/core/hpack/hpack_constants.h"
#include "quiche/spdy/core/hpack/hpack_entry.h"
#include "quiche/spdy/core/hpack/hpack_static_table.h"
namespace spdy {
using std::distance;
namespace test {
class HpackHeaderTablePeer {
public:
explicit HpackHeaderTablePeer(HpackHeaderTable* table) : table_(table) {}
const HpackHeaderTable::DynamicEntryTable& dynamic_entries() {
return table_->dynamic_entries_;
}
const HpackHeaderTable::StaticEntryTable& static_entries() {
return table_->static_entries_;
}
const HpackEntry* GetFirstStaticEntry() {
return &table_->static_entries_.front();
}
const HpackEntry* GetLastStaticEntry() {
return &table_->static_entries_.back();
}
std::vector<HpackEntry*> EvictionSet(absl::string_view name,
absl::string_view value) {
HpackHeaderTable::DynamicEntryTable::iterator begin, end;
table_->EvictionSet(name, value, &begin, &end);
std::vector<HpackEntry*> result;
for (; begin != end; ++begin) {
result.push_back(begin->get());
}
return result;
}
size_t dynamic_table_insertions() {
return table_->dynamic_table_insertions_;
}
size_t EvictionCountForEntry(absl::string_view name,
absl::string_view value) {
return table_->EvictionCountForEntry(name, value);
}
size_t EvictionCountToReclaim(size_t reclaim_size) {
return table_->EvictionCountToReclaim(reclaim_size);
}
void Evict(size_t count) { return table_->Evict(count); }
private:
HpackHeaderTable* table_;
};
}
namespace {
class HpackHeaderTableTest : public quiche::test::QuicheTest {
protected:
typedef std::vector<HpackEntry> HpackEntryVector;
HpackHeaderTableTest() : table_(), peer_(&table_) {}
static HpackEntry MakeEntryOfSize(uint32_t size) {
EXPECT_GE(size, kHpackEntrySizeOverhead);
std::string name((size - kHpackEntrySizeOverhead) / 2, 'n');
std::string value(size - kHpackEntrySizeOverhead - name.size(), 'v');
HpackEntry entry(name, value);
EXPECT_EQ(size, entry.Size());
return entry;
}
static HpackEntryVector MakeEntriesOfTotalSize(uint32_t total_size) {
EXPECT_GE(total_size, kHpackEntrySizeOverhead);
uint32_t entry_size = kHpackEntrySizeOverhead;
uint32_t remaining_size = total_size;
HpackEntryVector entries;
while (remaining_size > 0) {
EXPECT_LE(entry_size, remaining_size);
entries.push_back(MakeEntryOfSize(entry_size));
remaining_size -= entry_size;
entry_size = std::min(remaining_size, entry_size + 32);
}
return entries;
}
void AddEntriesExpectNoEviction(const HpackEntryVector& entries) {
for (auto it = entries.begin(); it != entries.end(); ++it) {
HpackHeaderTable::DynamicEntryTable::iterator begin, end;
table_.EvictionSet(it->name(), it->value(), &begin, &end);
EXPECT_EQ(0, distance(begin, end));
const HpackEntry* entry = table_.TryAddEntry(it->name(), it->value());
EXPECT_NE(entry, static_cast<HpackEntry*>(nullptr));
}
}
HpackHeaderTable table_;
test::HpackHeaderTablePeer peer_;
};
TEST_F(HpackHeaderTableTest, StaticTableInitialization) {
EXPECT_EQ(0u, table_.size());
EXPECT_EQ(kDefaultHeaderTableSizeSetting, table_.max_size());
EXPECT_EQ(kDefaultHeaderTableSizeSetting, table_.settings_size_bound());
EXPECT_EQ(0u, peer_.dynamic_entries().size());
EXPECT_EQ(0u, peer_.dynamic_table_insertions());
const HpackHeaderTable::StaticEntryTable& static_entries =
peer_.static_entries();
EXPECT_EQ(kStaticTableSize, static_entries.size());
size_t index = 1;
for (const HpackEntry& entry : static_entries) {
EXPECT_EQ(index, table_.GetByNameAndValue(entry.name(), entry.value()));
index++;
}
}
TEST_F(HpackHeaderTableTest, BasicDynamicEntryInsertionAndEviction) {
EXPECT_EQ(kStaticTableSize, peer_.static_entries().size());
const HpackEntry* first_static_entry = peer_.GetFirstStaticEntry();
const HpackEntry* last_static_entry = peer_.GetLastStaticEntry();
const HpackEntry* entry = table_.TryAddEntry("header-key", "Header Value");
EXPECT_EQ("header-key", entry->name());
EXPECT_EQ("Header Value", entry->value());
EXPECT_EQ(entry->Size(), table_.size());
EXPECT_EQ(1u, peer_.dynamic_entries().size());
EXPECT_EQ(kStaticTableSize, peer_.static_entries().size());
EXPECT_EQ(62u, table_.GetByNameAndValue("header-key", "Header Value"));
EXPECT_EQ(first_static_entry, peer_.GetFirstStaticEntry());
EXPECT_EQ(last_static_entry, peer_.GetLastStaticEntry());
peer_.Evict(1);
EXPECT_EQ(0u, table_.size());
EXPECT_EQ(0u, peer_.dynamic_entries().size());
EXPECT_EQ(kStaticTableSize, peer_.static_entries().size());
EXPECT_EQ(first_static_entry, peer_.GetFirstStaticEntry());
EXPECT_EQ(last_static_entry, peer_.GetLastStaticEntry());
}
TEST_F(HpackHeaderTableTest, EntryIndexing) {
const HpackEntry* first_static_entry = peer_.GetFirstStaticEntry();
const HpackEntry* last_static_entry = peer_.GetLastStaticEntry();
EXPECT_EQ(1u, table_.GetByName(first_static_entry->name()));
EXPECT_EQ(1u, table_.GetByNameAndValue(first_static_entry->name(),
first_static_entry->value()));
table_.TryAddEntry(first_static_entry->name(), first_static_entry->value());
table_.TryAddEntry(first_static_entry->name(), "Value Four");
table_.TryAddEntry("key-1", "Value One");
table_.TryAddEntry("key-2", "Value Three");
table_.TryAddEntry("key-1", "Value Two");
table_.TryAddEntry("key-2", "Value Three");
table_.TryAddEntry("key-2", "Value Four");
EXPECT_EQ(1u, table_.GetByNameAndValue(first_static_entry->name(),
first_static_entry->value()));
EXPECT_EQ(67u,
table_.GetByNameAndValue(first_static_entry->name(), "Value Four"));
EXPECT_EQ(66u, table_.GetByNameAndValue("key-1", "Value One"));
EXPECT_EQ(64u, table_.GetByNameAndValue("key-1", "Value Two"));
EXPECT_EQ(63u, table_.GetByNameAndValue("key-2", "Value Three"));
EXPECT_EQ(62u, table_.GetByNameAndValue("key-2", "Value Four"));
EXPECT_EQ(first_static_entry, peer_.GetFirstStaticEntry());
EXPECT_EQ(last_static_entry, peer_.GetLastStaticEntry());
EXPECT_EQ(64u, table_.GetByName("key-1"));
EXPECT_EQ(62u, table_.GetByName("key-2"));
EXPECT_EQ(1u, table_.GetByName(first_static_entry->name()));
EXPECT_EQ(kHpackEntryNotFound, table_.GetByName("not-present"));
EXPECT_EQ(66u, table_.GetByNameAndValue("key-1", "Value One"));
EXPECT_EQ(64u, table_.GetByNameAndValue("key-1", "Value Two"));
EXPECT_EQ(63u, table_.GetByNameAndValue("key-2", "Value Three"));
EXPECT_EQ(62u, table_.GetByNameAndValue("key-2", "Value Four"));
EXPECT_EQ(1u, table_.GetByNameAndValue(first_static_entry->name(),
first_static_entry->value()));
EXPECT_EQ(67u,
table_.GetByNameAndValue(first_static_entry->name(), "Value Four"));
EXPECT_EQ(kHpackEntryNotFound,
table_.GetByNameAndValue("key-1", "Not Present"));
EXPECT_EQ(kHpackEntryNotFound,
table_.GetByNameAndValue("not-present", "Value One"));
peer_.Evict(1);
EXPECT_EQ(1u, table_.GetByNameAndValue(first_static_entry->name(),
first_static_entry->value()));
EXPECT_EQ(67u,
table_.GetByNameAndValue(first_static_entry->name(), "Value Four"));
peer_.Evict(1);
EXPECT_EQ(kHpackEntryNotFound,
table_.GetByNameAndValue(first_static_entry->name(), "Value Four"));
EXPECT_EQ(first_static_entry, peer_.GetFirstStaticEntry());
EXPECT_EQ(last_static_entry, peer_.GetLastStaticEntry());
}
TEST_F(HpackHeaderTableTest, SetSizes) {
std::string key = "key", value = "value";
const HpackEntry* entry1 = table_.TryAddEntry(key, value);
const HpackEntry* entry2 = table_.TryAddEntry(key, value);
const HpackEntry* entry3 = table_.TryAddEntry(key, value);
size_t max_size = entry1->Size() + entry2->Size() + entry3->Size();
table_.SetMaxSize(max_size);
EXPECT_EQ(3u, peer_.dynamic_entries().size());
max_size = entry1->Size() + entry2->Size() + entry3->Size() - 1;
table_.SetMaxSize(max_size);
EXPECT_EQ(2u, peer_.dynamic_entries().size());
EXPECT_EQ(kDefaultHeaderTableSizeSetting, table_.settings_size_bound());
table_.SetSettingsHeaderTableSize(kDefaultHeaderTableSizeSetting * 3 + 1);
EXPECT_EQ(kDefaultHeaderTableSizeSetting * 3 + 1, table_.max_size());
max_size = entry3->Size() - 1;
table_.SetSettingsHeaderTableSize(max_size);
EXPECT_EQ(max_size, table_.max_size());
EXPECT_EQ(max_size, table_.settings_size_bound());
EXPECT_EQ(0u, peer_.dynamic_entries().size());
}
TEST_F(HpackHeaderTableTest, EvictionCountForEntry) {
std::string key = "key", value = "value";
const HpackEntry* entry1 = table_.TryAddEntry(key, value);
const HpackEntry* entry2 = table_.TryAddEntry(key, value);
size_t entry3_size = HpackEntry::Size(key, value);
table_.SetMaxSize(entry1->Size() + entry2->Size() + entry3_size);
EXPECT_EQ(0u, peer_.EvictionCountForEntry(key, value));
EXPECT_EQ(1u, peer_.EvictionCountForEntry(key, value + "x"));
table_.SetMaxSize(entry1->Size() + entry2->Size());
EXPECT_EQ(1u, peer_.EvictionCountForEntry(key, value));
EXPECT_EQ(2u, peer_.EvictionCountForEntry(key, value + "x"));
}
TEST_F(HpackHeaderTableTest, EvictionCountToReclaim) {
std::string key = "key", value = "value";
const HpackEntry* entry1 = table_.TryAddEntry(key, value);
const HpackEntry* entry2 = table_.TryAddEntry(key, value);
EXPECT_EQ(1u, peer_.EvictionCountToReclaim(1));
EXPECT_EQ(1u, peer_.EvictionCountToReclaim(entry1->Size()));
EXPECT_EQ(2u, peer_.EvictionCountToReclaim(entry1->Size() + 1));
EXPECT_EQ(2u, peer_.EvictionCountToReclaim(entry1->Size() + entry2->Size()));
}
TEST_F(HpackHeaderTableTest, TryAddEntryBasic) {
EXPECT_EQ(0u, table_.size());
EXPECT_EQ(table_.settings_size_bound(), table_.max_size());
HpackEntryVector entries = MakeEntriesOfTotalSize(table_.max_size());
AddEntriesExpectNoEviction(entries);
EXPECT_EQ(table_.max_size(), table_.size());
EXPECT_EQ(table_.settings_size_bound(), table_.size());
}
TEST_F(HpackHeaderTableTest, SetMaxSize) {
HpackEntryVector entries =
MakeEntriesOfTotalSize(kDefaultHeaderTableSizeSetting / 2);
AddEntriesExpectNoEviction(entries);
for (auto it = entries.begin(); it != entries.end(); ++it) {
size_t expected_count = distance(it, entries.end());
EXPECT_EQ(expected_count, peer_.dynamic_entries().size());
table_.SetMaxSize(table_.size() + 1);
EXPECT_EQ(expected_count, peer_.dynamic_entries().size());
table_.SetMaxSize(table_.size());
EXPECT_EQ(expected_count, peer_.dynamic_entries().size());
--expected_count;
table_.SetMaxSize(table_.size() - 1);
EXPECT_EQ(expected_count, peer_.dynamic_entries().size());
}
EXPECT_EQ(0u, table_.size());
}
TEST_F(HpackHeaderTableTest, TryAddEntryEviction) {
HpackEntryVector entries = MakeEntriesOfTotalSize(table_.max_size());
AddEntriesExpectNoEviction(entries);
const HpackEntry* survivor_entry = peer_.dynamic_entries().front().get();
HpackEntry long_entry =
MakeEntryOfSize(table_.max_size() - survivor_entry->Size());
EXPECT_EQ(peer_.dynamic_entries().size() - 1,
peer_.EvictionSet(long_entry.name(), long_entry.value()).size());
table_.TryAddEntry(long_entry.name(), long_entry.value());
EXPECT_EQ(2u, peer_.dynamic_entries().size());
EXPECT_EQ(63u, table_.GetByNameAndValue(survivor_entry->name(),
survivor_entry->value()));
EXPECT_EQ(62u,
table_.GetByNameAndValue(long_entry.name(), long_entry.value()));
}
TEST_F(HpackHeaderTableTest, TryAddTooLargeEntry) {
HpackEntryVector entries = MakeEntriesOfTotalSize(table_.max_size());
AddEntriesExpectNoEviction(entries);
const HpackEntry long_entry = MakeEntryOfSize(table_.max_size() + 1);
EXPECT_EQ(peer_.dynamic_entries().size(),
peer_.EvictionSet(long_entry.name(), long_entry.value()).size());
const HpackEntry* new_entry =
table_.TryAddEntry(long_entry.name(), long_entry.value());
EXPECT_EQ(new_entry, static_cast<HpackEntry*>(nullptr));
EXPECT_EQ(0u, peer_.dynamic_entries().size());
}
}
} | size_t HpackHeaderTable::EvictionCountForEntry(absl::string_view name,
absl::string_view value) const {
size_t available_size = max_size_ - size_;
size_t entry_size = HpackEntry::Size(name, value);
if (entry_size <= available_size) {
return 0;
}
return EvictionCountToReclaim(entry_size - available_size);
} | TEST_F(HpackHeaderTableTest, EvictionCountForEntry) {
std::string key = "key", value = "value";
const HpackEntry* entry1 = table_.TryAddEntry(key, value);
const HpackEntry* entry2 = table_.TryAddEntry(key, value);
size_t entry3_size = HpackEntry::Size(key, value);
table_.SetMaxSize(entry1->Size() + entry2->Size() + entry3_size);
EXPECT_EQ(0u, peer_.EvictionCountForEntry(key, value));
EXPECT_EQ(1u, peer_.EvictionCountForEntry(key, value + "x"));
table_.SetMaxSize(entry1->Size() + entry2->Size());
EXPECT_EQ(1u, peer_.EvictionCountForEntry(key, value));
EXPECT_EQ(2u, peer_.EvictionCountForEntry(key, value + "x"));
} |