Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@ List of Contributors
* [Piljae Chae](https://github.com/IHateMint)
* [Oliver Kowalke](https://github.com/olk)
* [Connor Goggins](https://github.com/connorgoggins)
* [Joe Evans](https://github.com/josephevans)

Label Bot
---------
Expand Down
11 changes: 6 additions & 5 deletions src/operator/tensor/la_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,10 @@ using namespace mshadow;
// Copies lower/upper triangular part to upper/lower, i.e. to the opposite side.
struct CopyTriangularToOppositeSide {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, int matrix_size, int stride, DType* data, bool to_lower) {
MSHADOW_XINLINE static void Map(index_t i, size_t matrix_size, index_t stride,
DType* data, bool to_lower) {
// Below computation works even when we are dealing with a batch of matrices.
const int row((i % matrix_size) / stride), col(i % stride);
const index_t row((i % matrix_size) / stride), col(i % stride);
if (row > col) {
if (to_lower) {
data[i] = data[i + (col - row) * (stride - 1)];
Expand All @@ -52,9 +53,9 @@ struct CopyTriangularToOppositeSide {
// Zero's lower/upper triangular part of a matrix.
struct ZeroTriangular {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, int matrix_size, int stride, DType* data,
bool zero_lower) {
const int row((i % matrix_size) / stride), col(i % stride);
MSHADOW_XINLINE static void Map(index_t i, size_t matrix_size, index_t stride,
DType* data, bool zero_lower) {
const index_t row((i % matrix_size) / stride), col(i % stride);
if ((!zero_lower && (row < col)) || (zero_lower && (row > col))) data[i] = 0;
}
};
Expand Down
27 changes: 27 additions & 0 deletions tests/nightly/test_large_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
LARGE_X = 100000000
SMALL_X = 100
SMALL_Y = 50
LARGE_SQ_X = 80000
LARGE_SIZE = LARGE_X * SMALL_Y
LARGE_TENSOR_SHAPE = 2**32
RNN_LARGE_TENSOR = 2**28
Expand Down Expand Up @@ -1167,6 +1168,32 @@ def check_correctness(mxnet_op, numpy_op, atol=1e-3):
check_gather()
check_binary_broadcast()

def test_linalg():
def check_potrf():
# creating an identity matrix input
A = nd.zeros((LARGE_SQ_X, LARGE_SQ_X))
for i in range(LARGE_SQ_X):
A[i,i] = 1

out = nd.linalg.potrf(A)
# output should be an identity matrix
for i in range(LARGE_SQ_X):
assert out[i,i] == 1

def check_potri():
# creating an identity matrix input
A = nd.zeros((LARGE_SQ_X, LARGE_SQ_X))
for i in range(LARGE_SQ_X):
A[i,i] = 1

out = nd.linalg.potri(A)
# output should be an identity matrix
for i in range(LARGE_SQ_X):
assert out[i,i] == 1

check_potrf()
check_potri()


def test_basic():
def check_elementwise():
Expand Down