Skip to content

Commit

Permalink
Support specifying array layout order as an arbitrary permutation
Browse files Browse the repository at this point in the history
Previously, while StridedLayout and Array internally support arbitrary
strides, there was no convenient interface for specifying any order
other than C or F.

With this change, an arbitrary permutation can be specified in place of
a `ContiguousLayoutOrder` (C/F) value in all StridedLayout and Array
APIs.

PiperOrigin-RevId: 695974377
Change-Id: Ib04958a881b36eed33bf173cde0aaae39cbfa975
  • Loading branch information
jbms authored and copybara-github committed Nov 13, 2024
1 parent 91984a3 commit d779cd5
Show file tree
Hide file tree
Showing 14 changed files with 589 additions and 360 deletions.
4 changes: 4 additions & 0 deletions tensorstore/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,7 @@ tensorstore_cc_library(
hdrs = ["chunk_layout.h"],
deps = [
":box",
":contiguous_layout",
":index",
":index_interval",
":json_serialization_options",
Expand Down Expand Up @@ -389,7 +390,10 @@ tensorstore_cc_library(
hdrs = ["contiguous_layout.h"],
deps = [
":index",
":rank",
"//tensorstore/util:dimension_set",
"//tensorstore/util:span",
"@com_google_absl//absl/log:absl_check",
],
)

Expand Down
111 changes: 67 additions & 44 deletions tensorstore/array.h
Original file line number Diff line number Diff line change
Expand Up @@ -570,25 +570,32 @@ class Array {
/// The caller is responsible for ensuring that `shape` and `order` are
/// valid for `element_pointer`. This function does not check them in any
/// way.
template <typename SourcePointer = ElementPointer, typename Shape,
std::enable_if_t<
(std::is_convertible_v<SourcePointer, ElementPointer> &&
LayoutContainerKind == container &&
IsImplicitlyCompatibleFullIndexVector<static_rank, Shape>)>* =
nullptr>
template <
typename SourcePointer = ElementPointer, typename Shape,
typename LayoutOrder = ContiguousLayoutOrder,
std::enable_if_t<
(std::is_convertible_v<SourcePointer, ElementPointer> &&
LayoutContainerKind == container &&
IsImplicitlyCompatibleFullIndexVector<static_rank, Shape> &&
IsContiguousLayoutOrder<
LayoutOrder, RankConstraint::FromInlineRank(Rank)>)>* = nullptr>
Array(SourcePointer element_pointer, const Shape& shape,
ContiguousLayoutOrder order = c_order) {
LayoutOrder order = c_order) {
this->element_pointer() = std::move(element_pointer);
InitializeContiguousLayout(order, this->dtype().size(),
tensorstore::span(shape), &this->layout());
}
template <typename SourcePointer = ElementPointer, DimensionIndex ShapeRank,
std::enable_if_t<
(std::is_convertible_v<SourcePointer, ElementPointer> &&
LayoutContainerKind == container &&
RankConstraint::Implies(ShapeRank, static_rank))>* = nullptr>
template <
typename SourcePointer = ElementPointer, DimensionIndex ShapeRank,
typename LayoutOrder = ContiguousLayoutOrder,
std::enable_if_t<
(std::is_convertible_v<SourcePointer, ElementPointer> &&
LayoutContainerKind == container &&
RankConstraint::Implies(ShapeRank, static_rank) &&
IsContiguousLayoutOrder<
LayoutOrder, RankConstraint::FromInlineRank(Rank)>)>* = nullptr>
Array(SourcePointer element_pointer, const Index (&shape)[ShapeRank],
ContiguousLayoutOrder order = c_order) {
LayoutOrder order = c_order) {
this->element_pointer() = std::move(element_pointer);
InitializeContiguousLayout(order, this->dtype().size(),
tensorstore::span(shape), &this->layout());
Expand Down Expand Up @@ -620,11 +627,14 @@ class Array {
/// \id element_pointer, domain, order
template <
typename SourcePointer = ElementPointer,
std::enable_if_t<(std::is_convertible_v<SourcePointer, ElementPointer> &&
LayoutContainerKind == container &&
OriginKind == offset_origin)>* = nullptr>
typename LayoutOrder = ContiguousLayoutOrder,
std::enable_if_t<
(std::is_convertible_v<SourcePointer, ElementPointer> &&
LayoutContainerKind == container && OriginKind == offset_origin &&
IsContiguousLayoutOrder<
LayoutOrder, RankConstraint::FromInlineRank(Rank)>)>* = nullptr>
Array(SourcePointer element_pointer, BoxView<static_rank> domain,
ContiguousLayoutOrder order = c_order) {
LayoutOrder order = c_order) {
this->element_pointer() = std::move(element_pointer);
InitializeContiguousLayout(order, this->dtype().size(), domain,
&this->layout());
Expand Down Expand Up @@ -985,20 +995,23 @@ Array(Pointer pointer,
-> Array<DeducedElementTag<Pointer>, Rank, OriginKind, LayoutContainerKind>;

template <typename Pointer, typename Shape,
std::enable_if_t<IsIndexConvertibleVector<Shape>>* = nullptr>
Array(Pointer pointer, const Shape& shape,
ContiguousLayoutOrder order = c_order)
typename LayoutOrder = ContiguousLayoutOrder,
std::enable_if_t<(IsIndexConvertibleVector<Shape> &&
IsContiguousLayoutOrder<LayoutOrder>)>* = nullptr>
Array(Pointer pointer, const Shape& shape, LayoutOrder order = c_order)
-> Array<DeducedElementTag<Pointer>, SpanStaticExtent<Shape>::value>;

template <typename Pointer, DimensionIndex Rank>
Array(Pointer pointer, const Index (&shape)[Rank],
ContiguousLayoutOrder order = c_order)
template <typename Pointer, DimensionIndex Rank,
typename LayoutOrder = ContiguousLayoutOrder,
std::enable_if_t<IsContiguousLayoutOrder<LayoutOrder>>* = nullptr>
Array(Pointer pointer, const Index (&shape)[Rank], LayoutOrder order = c_order)
-> Array<DeducedElementTag<Pointer>, Rank>;

template <typename Pointer, typename BoxLike,
std::enable_if_t<IsBoxLike<BoxLike>>* = nullptr>
Array(Pointer pointer, const BoxLike& domain,
ContiguousLayoutOrder order = c_order)
typename LayoutOrder = ContiguousLayoutOrder,
std::enable_if_t<(IsBoxLike<BoxLike> &&
IsContiguousLayoutOrder<LayoutOrder>)>* = nullptr>
Array(Pointer pointer, const BoxLike& domain, LayoutOrder order = c_order)
-> Array<DeducedElementTag<Pointer>, BoxLike::static_rank, offset_origin>;

// Specialization of `StaticCastTraits` for `Array`, which enables
Expand Down Expand Up @@ -1516,12 +1529,15 @@ void InitializeArray(const ArrayView<void, dynamic_rank, offset_origin>& array);
/// specified if `Element` is `void`.
/// \relates Array
/// \membergroup Creation functions
template <typename Element = void, typename Extents>
SharedArray<Element, internal::ConstSpanType<Extents>::extent> AllocateArray(
const Extents& extents,
ContiguousLayoutOrder layout_order = ContiguousLayoutOrder::c,
ElementInitialization initialization = default_init,
dtype_t<Element> dtype = dtype_v<Element>) {
template <typename Element = void, typename Extents,
typename LayoutOrder = ContiguousLayoutOrder>
std::enable_if_t<IsContiguousLayoutOrder<
LayoutOrder, internal::ConstSpanType<Extents>::extent>,
SharedArray<Element, internal::ConstSpanType<Extents>::extent>>
AllocateArray(const Extents& extents,
LayoutOrder layout_order = ContiguousLayoutOrder::c,
ElementInitialization initialization = default_init,
dtype_t<Element> dtype = dtype_v<Element>) {
static_assert(internal::IsIndexPack<
typename internal::ConstSpanType<Extents>::value_type>,
"Extent type must be convertible without narrowing to Index.");
Expand All @@ -1530,12 +1546,14 @@ SharedArray<Element, internal::ConstSpanType<Extents>::extent> AllocateArray(
layout.num_elements(), initialization, dtype),
std::move(layout)};
}
template <typename Element = void, typename BoxType>
std::enable_if_t<IsBoxLike<BoxType>,
template <typename Element = void, typename BoxType,
typename LayoutOrder = ContiguousLayoutOrder>
std::enable_if_t<(IsBoxLike<BoxType> &&
IsContiguousLayoutOrder<LayoutOrder, BoxType::static_rank>),
SharedArray<Element, BoxType::static_rank,
offset_origin>> // NONITPICK: BoxType::static_rank
AllocateArray(const BoxType& domain,
ContiguousLayoutOrder layout_order = ContiguousLayoutOrder::c,
LayoutOrder layout_order = ContiguousLayoutOrder::c,
ElementInitialization initialization = default_init,
dtype_t<Element> dtype = dtype_v<Element>) {
StridedLayout<BoxType::static_rank, offset_origin> layout(
Expand All @@ -1550,12 +1568,14 @@ AllocateArray(const BoxType& domain,

// Same as more general overload defined above, but can be called using a braced
// list to specify the extents.
template <typename Element = void, DimensionIndex Rank>
SharedArray<Element, Rank> AllocateArray(
const Index (&extents)[Rank],
ContiguousLayoutOrder layout_order = ContiguousLayoutOrder::c,
ElementInitialization initialization = default_init,
dtype_t<Element> representation = dtype_v<Element>) {
template <typename Element = void, DimensionIndex Rank,
typename LayoutOrder = ContiguousLayoutOrder>
std::enable_if_t<IsContiguousLayoutOrder<LayoutOrder, Rank>,
SharedArray<Element, Rank>>
AllocateArray(const Index (&extents)[Rank],
LayoutOrder layout_order = ContiguousLayoutOrder::c,
ElementInitialization initialization = default_init,
dtype_t<Element> representation = dtype_v<Element>) {
return AllocateArray<Element, tensorstore::span<const Index, Rank>>(
extents, layout_order, initialization, representation);
}
Expand Down Expand Up @@ -2058,10 +2078,13 @@ UnbroadcastArrayPreserveRank(
/// \relates Array
/// \id array
template <typename ElementTag, DimensionIndex Rank, ArrayOriginKind OriginKind,
ContainerKind LayoutCKind>
bool IsContiguousLayout(
ContainerKind LayoutCKind, typename LayoutOrder>
std::enable_if_t<
IsContiguousLayoutOrder<LayoutOrder, RankConstraint::FromInlineRank(Rank)>,
bool>
IsContiguousLayout(
const Array<ElementTag, Rank, OriginKind, LayoutCKind>& array,
ContiguousLayoutOrder order) {
LayoutOrder order) {
return tensorstore::IsContiguousLayout(array.layout(), order,
array.dtype().size());
}
Expand Down
28 changes: 28 additions & 0 deletions tensorstore/array_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1553,6 +1553,34 @@ TEST(SharedArrayTest, AllocateArrayFromDomain) {
ToString(array));
}

TEST(SharedArrayTest, AllocateArrayWithLayoutPermutation) {
Index shape[] = {2, 3, 4};
DimensionIndex permutation[] = {2, 0, 1};
auto array = tensorstore::AllocateArray<int>(
shape, tensorstore::ContiguousLayoutPermutation(permutation),
tensorstore::value_init);
EXPECT_THAT(array.shape(), ::testing::ElementsAre(2, 3, 4));
EXPECT_THAT(array.byte_strides(),
::testing::ElementsAre(3 * sizeof(int), sizeof(int),
2 * 3 * sizeof(int)));
array(0, 2, 3) = 10;
array(1, 1, 0) = 5;
EXPECT_EQ(
"{{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 10}}, "
"{{0, 0, 0, 0}, {5, 0, 0, 0}, {0, 0, 0, 0}}}",
ToString(array));
}

TEST(SharedArrayDeathTest, AllocateArrayWithInvalidLayoutPermutation) {
Index shape[] = {2, 3, 4};
DimensionIndex permutation[] = {2, 0, 2};

EXPECT_DEATH((tensorstore::AllocateArray<int>(
shape, tensorstore::ContiguousLayoutPermutation(permutation),
tensorstore::value_init)),
"IsValidPermutation");
}

template <ContainerKind SourceLayoutCKind, ContainerKind TargetLayoutCKind>
void TestArrayOriginCastOffsetOriginToZeroOrigin() {
auto source = MakeOffsetArray<int>({2, 3}, {{1, 2, 3}, {4, 5, 6}});
Expand Down
12 changes: 7 additions & 5 deletions tensorstore/chunk_layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/integer_range.h"
Expand Down Expand Up @@ -619,7 +620,7 @@ class ChunkLayout {
/// EXPECT_THAT(constraints.inner_order(),
/// ::testing::ElementsAre(1, 0, 2));
/// EXPECT_EQ(true, constraints.inner_order().hard_constraint);
struct InnerOrder : public tensorstore::span<const DimensionIndex> {
struct InnerOrder : public ContiguousLayoutPermutation<> {
/// Constructs an unspecified order.
///
/// \id default
Expand All @@ -630,13 +631,11 @@ class ChunkLayout {
/// \id order
explicit InnerOrder(tensorstore::span<const DimensionIndex> s,
bool hard_constraint = true)
: tensorstore::span<const DimensionIndex>(s),
hard_constraint(hard_constraint) {}
: ContiguousLayoutPermutation<>(s), hard_constraint(hard_constraint) {}
template <size_t N>
explicit InnerOrder(const DimensionIndex (&s)[N],
bool hard_constraint = true)
: tensorstore::span<const DimensionIndex>(s),
hard_constraint(hard_constraint) {}
: ContiguousLayoutPermutation<>(s), hard_constraint(hard_constraint) {}

/// Returns `true` if this specifies an order constraint.
bool valid() const { return !this->empty(); }
Expand Down Expand Up @@ -1034,6 +1033,9 @@ constexpr bool ChunkLayout::IsOption<RankConstraint> = true;
template <>
constexpr bool ChunkLayout::IsOption<ChunkLayout::InnerOrder> = true;

template <DimensionIndex Rank>
constexpr bool ChunkLayout::IsOption<ContiguousLayoutPermutation<Rank>> = true;

template <>
constexpr bool ChunkLayout::IsOption<ChunkLayout::GridOrigin> = true;

Expand Down
90 changes: 90 additions & 0 deletions tensorstore/contiguous_layout.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,68 @@

#include <stddef.h>

#include <algorithm>
#include <cassert>
#include <numeric>
#include <ostream>

#include "absl/log/absl_check.h"
#include "tensorstore/index.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/span.h"

namespace tensorstore {

void SetPermutation(ContiguousLayoutOrder order,
span<DimensionIndex> permutation) {
if (order == c_order) {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
permutation[i] = i;
}
} else {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
permutation[i] = permutation.size() - 1 - i;
}
}
}

bool IsValidPermutation(span<const DimensionIndex> permutation) {
DimensionSet seen_dims;
const DimensionIndex rank = permutation.size();
if (rank > kMaxRank) return false;
for (DimensionIndex i = 0; i < rank; ++i) {
DimensionIndex dim = permutation[i];
if (dim < 0 || dim >= rank || seen_dims[dim]) {
return false;
}
seen_dims[dim] = true;
}
return true;
}

bool PermutationMatchesOrder(span<const DimensionIndex> permutation,
ContiguousLayoutOrder order) {
if (order == c_order) {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
if (permutation[i] != i) return false;
}
} else {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
if (permutation[i] != permutation.size() - i - 1) return false;
}
}
return true;
}

void InvertPermutation(DimensionIndex rank, const DimensionIndex* perm,
DimensionIndex* inverse_perm) {
assert(IsValidPermutation(span(perm, rank)));
for (DimensionIndex i = 0; i < rank; ++i) {
inverse_perm[perm[i]] = i;
}
}

void ComputeStrides(ContiguousLayoutOrder order, ptrdiff_t element_stride,
tensorstore::span<const Index> shape,
tensorstore::span<Index> strides) {
Expand All @@ -42,6 +96,42 @@ void ComputeStrides(ContiguousLayoutOrder order, ptrdiff_t element_stride,
}
}

void ComputeStrides(ContiguousLayoutPermutation<> permutation,
ptrdiff_t element_stride,
tensorstore::span<const Index> shape,
tensorstore::span<Index> strides) {
const DimensionIndex rank = shape.size();
ABSL_CHECK(strides.size() == rank);
ABSL_CHECK(permutation.size() == rank);
ABSL_CHECK(IsValidPermutation(permutation));
for (DimensionIndex j = rank; j--;) {
DimensionIndex i = permutation[j];
assert(i >= 0 && i < rank);
strides[i] = element_stride;
element_stride *= shape[i];
}
}

void SetPermutationFromStrides(span<const Index> strides,
span<DimensionIndex> permutation) {
assert(strides.size() == permutation.size());
std::iota(permutation.begin(), permutation.end(), DimensionIndex(0));
// Return the negative absolute value of the effective stride of
// dimension `i`. We use negative rather than positive absolute value to
// avoid possible overflow.
const auto get_effective_stride_nabs = [&](DimensionIndex i) -> Index {
const Index stride = strides[i];
if (stride > 0) return -stride;
return stride;
};
// Sort in order of decreasing effective byte stride.
std::stable_sort(permutation.begin(), permutation.end(),
[&](DimensionIndex a, DimensionIndex b) {
return get_effective_stride_nabs(a) <
get_effective_stride_nabs(b);
});
}

std::ostream& operator<<(std::ostream& os, ContiguousLayoutOrder order) {
return os << (order == ContiguousLayoutOrder::c ? 'C' : 'F');
}
Expand Down
Loading

0 comments on commit d779cd5

Please sign in to comment.