-
Notifications
You must be signed in to change notification settings - Fork 482
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add cumulative sum tensor operation #1722
Changes from 5 commits
b7188aa
c51d7be
729778e
ed7681e
9175f92
3a477a2
46cc2c8
91c27b4
fdc5c95
8b9b39e
dab2905
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,7 @@ | ||
// Language | ||
use alloc::vec::Vec; | ||
use core::ops::Range; | ||
use ndarray::IntoDimension; | ||
use ndarray::{Axis, IntoDimension}; | ||
|
||
// Current crate | ||
use super::{matmul::matmul, NdArrayMathOps, NdArrayOps}; | ||
|
@@ -338,6 +338,17 @@ impl<E: FloatNdArrayElement> FloatTensorOps<Self> for NdArray<E> { | |
NdArrayMathOps::sum_dim(tensor, dim) | ||
} | ||
|
||
fn float_cumsum_dim<const D: usize>( | ||
tensor: NdArrayTensor<E, D>, | ||
dim: usize, | ||
) -> NdArrayTensor<E, D> { | ||
let mut array = tensor.array.clone().into_owned(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I believe the underlying array struct of There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Well |
||
|
||
array.accumulate_axis_inplace(Axis(dim), |&prev, curr| *curr += prev); | ||
|
||
NdArrayTensor::new(array.to_shared()) | ||
} | ||
|
||
fn float_argmax<const D: usize>( | ||
tensor: NdArrayTensor<E, D>, | ||
dim: usize, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -803,6 +803,22 @@ impl TensorCheck { | |
check | ||
} | ||
|
||
/// Checks running dimension such as cumulative sum | ||
pub(crate) fn running_dim<const D: usize>(ops: &str, dim: usize) -> Self { | ||
let mut check = Self::Ok; | ||
|
||
if dim > D { | ||
check = check.register( | ||
ops, | ||
TensorError::new(format!( | ||
"Can't perform a running calculation on a tensor with ({D}) dimensions on axis ({dim})" | ||
)), | ||
); | ||
} | ||
|
||
check | ||
} | ||
|
||
Comment on lines
+806
to
+821
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You could use the existing |
||
pub(crate) fn sort_dim<const D: usize>(ops: &str, dim: usize) -> Self { | ||
let mut check = Self::Ok; | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -770,6 +770,18 @@ pub trait IntTensorOps<B: Backend> { | |
/// The sum of all elements in the tensor along the dimension. | ||
fn int_sum_dim<const D: usize>(tensor: IntTensor<B, D>, dim: usize) -> IntTensor<B, D>; | ||
|
||
/// Cumulative Sum of all elements in a tensor along a dimension. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's keep the capitalization at "Cumulative sum" |
||
/// | ||
/// # Arguments | ||
/// | ||
/// * `tensor` - The tensor to perform cumulative sum on. | ||
/// * `dim` - The dimension along which to perform cumulative sum. | ||
/// | ||
/// # Returns | ||
/// | ||
/// A tensor with the cumulative sum of all elements in `tensor` along `dim`. | ||
fn int_cumsum_dim<const D: usize>(tensor: IntTensor<B, D>, dim: usize) -> IntTensor<B, D>; | ||
|
||
/// Computes the product of all elements in the tensor. | ||
/// | ||
/// # Arguments | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -842,6 +842,19 @@ pub trait FloatTensorOps<B: Backend> { | |
/// A tensor with the sum of all elements in `tensor` along `dim`. | ||
fn float_sum_dim<const D: usize>(tensor: FloatTensor<B, D>, dim: usize) -> FloatTensor<B, D>; | ||
|
||
/// Cumulative Sum of all elements in a tensor along a dimension. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same thing regarding capitalization |
||
/// | ||
/// # Arguments | ||
/// | ||
/// * `tensor` - The tensor to perform cumulative sum on. | ||
/// * `dim` - The dimension along which to perform cumulative sum. | ||
/// | ||
/// # Returns | ||
/// | ||
/// A tensor with the cumulative sum of all elements in `tensor` along `dim`. | ||
fn float_cumsum_dim<const D: usize>(tensor: FloatTensor<B, D>, dim: usize) | ||
-> FloatTensor<B, D>; | ||
|
||
/// Product of all elements in a tensor. | ||
/// | ||
/// # Arguments | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
See comment for
float_cumsum