Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion extension/apple/ExecuTorch/Exported/ExecuTorchTensor.mm
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ - (instancetype)initWithNativeInstance:(void *)nativeInstance {
- (instancetype)initWithTensor:(ExecuTorchTensor *)otherTensor {
ET_CHECK(otherTensor);
auto tensor = make_tensor_ptr(
**reinterpret_cast<TensorPtr *>(otherTensor.nativeInstance)
*reinterpret_cast<TensorPtr *>(otherTensor.nativeInstance)
);
return [self initWithNativeInstance:&tensor];
}
Expand Down
85 changes: 70 additions & 15 deletions extension/tensor/tensor_ptr.h
Original file line number Diff line number Diff line change
Expand Up @@ -327,29 +327,84 @@ inline TensorPtr make_tensor_ptr(
* Creates a TensorPtr to manage a new Tensor with the same properties
* as the given Tensor, sharing the same data without owning it.
*
* @param tensor The Tensor whose properties are used to create a new TensorPtr.
* @return A new TensorPtr managing a Tensor with the same properties as the
* original.
* If an override is provided (non-empty), it is passed as-is. If an override is
* empty, the corresponding metadata is reused from the source tensor when it
* fits; otherwise it is left empty for the core factory to derive a valid
* configuration. If `dim_order` is empty but `strides` is provided, `dim_order`
* is left empty so the core may infer it from the provided strides.
*
* @param tensor The source tensor to alias.
* @param sizes Optional sizes override.
* @param dim_order Optional dimension order override.
* @param strides Optional strides override.
* @param deleter A custom deleter function for managing the lifetime of the
* original Tensor.
* @return A TensorPtr aliasing the same storage with requested metadata.
*/
inline TensorPtr make_tensor_ptr(const executorch::aten::Tensor& tensor) {
inline TensorPtr make_tensor_ptr(
const executorch::aten::Tensor& tensor,
std::vector<executorch::aten::SizesType> sizes = {},
std::vector<executorch::aten::DimOrderType> dim_order = {},
std::vector<executorch::aten::StridesType> strides = {},
std::function<void(void*)> deleter = nullptr) {
if (sizes.empty()) {
sizes.assign(tensor.sizes().begin(), tensor.sizes().end());
}
const auto same_rank = sizes.size() == static_cast<size_t>(tensor.dim());
const auto same_shape = same_rank &&
std::equal(sizes.begin(), sizes.end(), tensor.sizes().begin());
const auto element_count =
executorch::aten::compute_numel(sizes.data(), sizes.size());
const auto parent_element_count = tensor.numel();
ET_CHECK_MSG(
element_count <= parent_element_count,
"Requested view has %zd elements, but source tensor only has %zd.",
static_cast<ssize_t>(element_count),
static_cast<ssize_t>(parent_element_count));
#ifndef USE_ATEN_LIB
if (dim_order.empty() && strides.empty() && same_rank) {
dim_order.assign(tensor.dim_order().begin(), tensor.dim_order().end());
}
#endif // USE_ATEN_LIB
if (strides.empty() && dim_order.empty() && same_shape) {
strides.assign(tensor.strides().begin(), tensor.strides().end());
}
return make_tensor_ptr(
std::vector<executorch::aten::SizesType>(
tensor.sizes().begin(), tensor.sizes().end()),
tensor.mutable_data_ptr(),
#ifndef USE_ATEN_LIB
std::vector<executorch::aten::DimOrderType>(
tensor.dim_order().begin(), tensor.dim_order().end()),
std::vector<executorch::aten::StridesType>(
tensor.strides().begin(), tensor.strides().end()),
std::move(dim_order),
std::move(strides),
tensor.scalar_type(),
tensor.shape_dynamism()
#ifndef USE_ATEN_LIB
tensor.shape_dynamism(),
#else // USE_ATEN_LIB
{},
std::vector<executorch::aten::StridesType>(
tensor.strides().begin(), tensor.strides().end()),
tensor.scalar_type()
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND,
#endif // USE_ATEN_LIB
);
std::move(deleter));
}

/**
* Convenience overload identical to make_tensor_ptr(*tensor_ptr, ...).
* Keeps the original TensorPtr alive until the returned TensorPtr is destroyed.
*
* @param tensor_ptr The source tensor pointer to alias.
* @param sizes Optional sizes override.
* @param dim_order Optional dimension order override.
* @param strides Optional strides override.
* @return A TensorPtr aliasing the same storage with requested metadata.
*/
inline TensorPtr make_tensor_ptr(
const TensorPtr& tensor_ptr,
std::vector<executorch::aten::SizesType> sizes = {},
std::vector<executorch::aten::DimOrderType> dim_order = {},
std::vector<executorch::aten::StridesType> strides = {}) {
return make_tensor_ptr(
*tensor_ptr,
std::move(sizes),
std::move(dim_order),
std::move(strides),
[tensor_ptr](void*) {});
}

/**
Expand Down
68 changes: 68 additions & 0 deletions extension/tensor/test/tensor_ptr_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -790,6 +790,74 @@ TEST_F(TensorPtrTest, TensorUint8BufferTooLargeExpectDeath) {
ET_EXPECT_DEATH({ auto _ = make_tensor_ptr({2, 2}, std::move(data)); }, "");
}

TEST_F(TensorPtrTest, MakeViewFromTensorPtrKeepsSourceAlive) {
bool freed = false;
auto* data = new float[6]{1, 2, 3, 4, 5, 6};
auto tensor = make_tensor_ptr(
{2, 3},
data,
{},
{},
executorch::aten::ScalarType::Float,
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND,
[&freed](void* p) {
freed = true;
delete[] static_cast<float*>(p);
});
auto view = make_tensor_ptr(tensor);
tensor.reset();
EXPECT_FALSE(freed);
EXPECT_EQ(view->const_data_ptr<float>()[0], 1.0f);
view->mutable_data_ptr<float>()[0] = 42.0f;
EXPECT_EQ(view->const_data_ptr<float>()[0], 42.0f);
view.reset();
EXPECT_TRUE(freed);
}

TEST_F(TensorPtrTest, MakeViewFromTensorDoesNotKeepAliveByDefault) {
bool freed = false;
auto* data = new float[2]{7.0f, 8.0f};
auto tensor = make_tensor_ptr(
{2, 1},
data,
{},
{},
executorch::aten::ScalarType::Float,
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND,
[&freed](void* p) {
freed = true;
delete[] static_cast<float*>(p);
});
auto view = make_tensor_ptr(*tensor);
auto raw = view->const_data_ptr<float>();
EXPECT_EQ(raw, data);
tensor.reset();
EXPECT_TRUE(freed);
view.reset();
}

TEST_F(TensorPtrTest, MakeViewFromTensorWithDeleterKeepsAlive) {
bool freed = false;
auto* data = new float[3]{1.0f, 2.0f, 3.0f};
auto tensor = make_tensor_ptr(
{3},
data,
{},
{},
executorch::aten::ScalarType::Float,
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND,
[&freed](void* p) {
freed = true;
delete[] static_cast<float*>(p);
});
auto view = make_tensor_ptr(*tensor, {}, {}, {}, [tensor](void*) {});
tensor.reset();
EXPECT_FALSE(freed);
EXPECT_EQ(view->const_data_ptr<float>()[2], 3.0f);
view.reset();
EXPECT_TRUE(freed);
}

TEST_F(TensorPtrTest, VectorFloatTooSmallExpectDeath) {
std::vector<float> data(9, 1.f);
ET_EXPECT_DEATH({ auto _ = make_tensor_ptr({2, 5}, std::move(data)); }, "");
Expand Down
Loading