diff --git a/.github/workflows/pypi-linux-test.yaml b/.github/workflows/pypi-linux-test.yaml
deleted file mode 100644
index d930e64f..00000000
--- a/.github/workflows/pypi-linux-test.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-name: pypi-linux-test
-
-on:
- workflow_dispatch:
-
-env:
- PACKAGE_NAME: clarabel
- PYTHON_VERSION: "3.7" # to build abi3 wheels
- PYPI_TARGET: pypi # use "testpypi" for testing, "pypi" otherwise
-
-# NB: uncomment "if" lines below to skip compilation except on version tagging
-
-jobs:
-
- linux:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- target: [x86_64, i686, aarch64]
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
- with:
- python-version: ${{ env.PYTHON_VERSION }}
- architecture: x64
- - name: Build wheels
- uses: messense/maturin-action@v1
- with:
- target: ${{ matrix.target }}
- manylinux: auto
- args: --release --out dist --features python -i python${{ env.PYTHON_VERSION }}
- - name: Install built wheel
- if: matrix.target == 'x86_64'
- run: |
- python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
- python -c "import clarabel"
- python examples/python/example_qp.py
- - name: Upload wheels
- uses: actions/upload-artifact@v2
- with:
- name: wheels
- path: dist
-
diff --git a/.github/workflows/pypi-macos-test.yaml b/.github/workflows/pypi-macos-test.yaml
deleted file mode 100644
index 55848d88..00000000
--- a/.github/workflows/pypi-macos-test.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-name: pypi-macos-test
-
-on:
- workflow_dispatch:
-
-env:
- PACKAGE_NAME: clarabel
- PYTHON_VERSION: "3.7" # to build abi3 wheels
- PYPI_TARGET: pypi # use "testpypi" for testing, "pypi" otherwise
-
-# NB: uncomment "if" lines below to skip compilation except on version tagging
-
-jobs:
-
- macos:
- runs-on: macos-latest
- # if: "startsWith(github.ref, 'refs/tags/v')"
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
- with:
- python-version: ${{ env.PYTHON_VERSION }}
- architecture: x64
- - name: Install Rust toolchain
- uses: actions-rs/toolchain@v1
- with:
- toolchain: stable
- profile: minimal
- default: true
- - name: Build wheels - x86_64
- uses: messense/maturin-action@v1
- with:
- target: x86_64
- args: --release --out dist --features python -i python${{ env.PYTHON_VERSION }}
- - name: Install built wheel - x86_64
- run: |
- pip install --upgrade pip
- pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
- python -c "import clarabel"
- python examples/python/example_qp.py
- - name: Build wheels - universal2
- uses: messense/maturin-action@v1
- with:
- args: --release --universal2 --out dist --features python -i python${{ env.PYTHON_VERSION }}
- - name: Install built wheel - universal2
- run: |
- pip install --upgrade pip
- pip install dist/${{ env.PACKAGE_NAME }}-*universal2.whl --force-reinstall
- python -c "import clarabel"
- python examples/python/example_qp.py
- - name: Upload wheels
- uses: actions/upload-artifact@v2
- with:
- name: wheels
- path: dist
diff --git a/.github/workflows/pypi-win-test.yaml b/.github/workflows/pypi-win-test.yaml
deleted file mode 100644
index c792733f..00000000
--- a/.github/workflows/pypi-win-test.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-name: pypi-win-test
-
-on:
- workflow_dispatch:
-
-env:
- PACKAGE_NAME: clarabel
- PYTHON_VERSION: "3.7" # to build abi3 wheels
- PYPI_TARGET: pypi # use "testpypi" for testing, "pypi" otherwise
-
-# NB: uncomment "if" lines below to skip compilation except on version tagging
-
-jobs:
-
- windows:
- runs-on: windows-latest
- # if: "startsWith(github.ref, 'refs/tags/v')"
- strategy:
- matrix:
- target: [x64, x86]
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
- with:
- python-version: ${{ env.PYTHON_VERSION }}
- architecture: ${{ matrix.target }}
- - name: Install Rust toolchain
- uses: actions-rs/toolchain@v1
- with:
- toolchain: stable
- profile: minimal
- default: true
- - name: Build wheels
- uses: messense/maturin-action@v1
- with:
- target: ${{ matrix.target }}
- args: --release --out dist --features python -i python${{ env.PYTHON_VERSION }}
- - name: Install built wheel
- shell: bash
- run: |
- python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
- python -c "import clarabel"
- python examples/python/example_qp.py
- - name: Upload wheels
- uses: actions/upload-artifact@v2
- with:
- name: wheels
- path: dist
-
-
- release:
- name: Release
- runs-on: ubuntu-latest
- # if: "startsWith(github.ref, 'refs/tags/v')"
- needs:
- - macos
- - windows
- - linux
- steps:
- - uses: actions/download-artifact@v2
- with:
- name: wheels
- - uses: actions/setup-python@v2
- with:
- python-version: ${{ env.PYTHON_VERSION }}
- - name: Publish to PyPi
- env:
- TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
- TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
- run: |
- pip install --upgrade twine
- twine upload --skip-existing --repository ${{ env.PYPI_TARGET }} *
diff --git a/.github/workflows/pypi.yaml b/.github/workflows/pypi.yaml
index a79d6f49..e3d0271a 100644
--- a/.github/workflows/pypi.yaml
+++ b/.github/workflows/pypi.yaml
@@ -1,6 +1,7 @@
name: pypi
on:
+ workflow_dispatch:
pull_request:
branches:
- main
@@ -22,28 +23,32 @@ jobs:
linux:
runs-on: ubuntu-latest
strategy:
- matrix:
+ matrix:
target: [x86_64, i686, aarch64]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- architecture: x64
+
- name: Build wheels
- uses: messense/maturin-action@v1
+ uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
+ command: build
manylinux: auto
- args: --release --out dist --features python -i python${{ env.PYTHON_VERSION }}
- - name: Install built wheel
+ args: -v --release --out dist -i python${{ env.PYTHON_VERSION }} --features "python"
+
+ - name: Install and test built wheel
if: matrix.target == 'x86_64'
run: |
python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
python -c "import clarabel"
python examples/python/example_qp.py
+ python examples/python/example_sdp.py
+
- name: Upload wheels
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
@@ -58,33 +63,43 @@ jobs:
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
+
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
default: true
+
- name: Build wheels - x86_64
- uses: messense/maturin-action@v1
+ uses: PyO3/maturin-action@v1
with:
target: x86_64
- args: --release --out dist --features python -i python${{ env.PYTHON_VERSION }}
- - name: Install built wheel - x86_64
+ command: build
+ args: --release -i python${{ env.PYTHON_VERSION }} --out dist --features "python"
+
+ - name: Install and test built wheel - x86_64
run: |
pip install --upgrade pip
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
python -c "import clarabel"
python examples/python/example_qp.py
+ python examples/python/example_sdp.py
+
- name: Build wheels - universal2
- uses: messense/maturin-action@v1
+ uses: PyO3/maturin-action@v1
with:
- args: --release --universal2 --out dist --features python -i python${{ env.PYTHON_VERSION }}
- - name: Install built wheel - universal2
+ command: build
+ args: --release -i python${{ env.PYTHON_VERSION }} --universal2 --out dist --features "python"
+
+ - name: Install and test built wheel - universal2
run: |
pip install --upgrade pip
pip install dist/${{ env.PACKAGE_NAME }}-*universal2.whl --force-reinstall
python -c "import clarabel"
python examples/python/example_qp.py
+ python examples/python/example_sdp.py
+
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
@@ -103,23 +118,22 @@ jobs:
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: ${{ matrix.target }}
- - name: Install Rust toolchain
- uses: actions-rs/toolchain@v1
- with:
- toolchain: stable
- profile: minimal
- default: true
+
- name: Build wheels
- uses: messense/maturin-action@v1
+ uses: PyO3/maturin-action@v1
with:
+ command: build
target: ${{ matrix.target }}
- args: --release --out dist --features python -i python${{ env.PYTHON_VERSION }}
- - name: Install built wheel
+ args: --release --out dist -i python${{ env.PYTHON_VERSION }} --features "python"
+
+ - name: Install and test built wheel
shell: bash
run: |
python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
python -c "import clarabel"
python examples/python/example_qp.py
+ python examples/python/example_sdp.py
+
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
@@ -136,7 +150,7 @@ jobs:
- windows
- linux
steps:
- - uses: actions/download-artifact@v2
+ - uses: actions/download-artifact@v3
with:
name: wheels
- uses: actions/setup-python@v2
diff --git a/.gitignore b/.gitignore
index d2ce101a..5548b98d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,5 @@ Cargo.lock
/env
notes*.txt
/dist
+__pycache__
+*.so*
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2ae4ed57..32e389a4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,11 +1,23 @@
# Changelog
-Changes for the Julia version of Clarabel are documented in this file. For the Rust version, see [here](https://github.com/oxfordcontrol/clarabel.rs/CHANGELOG.md).
+Changes for the Rust version of Clarabel are documented in this file. For the Julia version, see [here](https://github.com/oxfordcontrol/Clarabel.jl/blob/main/CHANGELOG.md).
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
Version numbering in this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). We aim to keep the core solver functionality and minor releases in sync between the Rust/Python and Julia implementations. Small fixes that affect one implementation only may result in the patch release versions differing.
+## [0.5.0] - 2023-25-04
+### Changed
+
+This version ports support for PSD cones from the Julia version to Rust, with internal supporting modifications to both versions to keep implementations synchronized.
+
+### Rust specific changes
+
+- Package implements a variety of dense linear algebra functions using BLAS/LAPACK in support of the PSD cone implementation. Provides various build options for using different BLAS/LAPACK external libraries.
+
+- Python interface makes direct access to SciPy BLAS/LAPACK internal function pointers so that no external library linking is required for distribution via PyPI.
+
+
## [0.4.1] - 2023-08-03
### Changed
@@ -58,6 +70,7 @@ offline against the Julia-based benchmark problem suite, but this will not appea
- Ported all documentation to the common site [here](https://github.com/oxfordcontrol/ClarabelDocs)
+[0.5.0]: https://github.com/oxfordcontrol/Clarabel.rs/compare/v0.4.1...v0.5.0
[0.4.1]: https://github.com/oxfordcontrol/Clarabel.rs/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/oxfordcontrol/Clarabel.rs/compare/v0.3.0...v0.4.0
[0.3.0]: https://github.com/oxfordcontrol/Clarabel.rs/compare/v0.2.0...v0.3.0
diff --git a/Cargo.toml b/Cargo.toml
index 9604fe20..d659224c 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,22 +1,77 @@
[package]
name = "clarabel"
-version = "0.4.1"
+version = "0.5.0"
authors = ["Paul Goulart "]
edition = "2021"
+rust-version = "1.60"
license = "Apache-2.0"
description = "Clarabel Conic Interior Point Solver for Rust / Python"
readme = "README.md"
repository = "https://github.com/oxfordcontrol/Clarabel.rs"
-keywords = ["convex", "optimization", "QP", "LP", "SOCP"]
+keywords = ["convex", "optimization", "QP", "LP", "SOCP", "SDP", "conic"]
categories = ["mathematics"]
+#required for openssl to work on git actions
+resolver = "1"
+
[dependencies]
-lazy_static = "1.4"
-num-traits = "0.2"
+lazy_static = "1.4"
+num-traits = "0.2"
derive_builder = "0.11"
-enum_dispatch = "0.3.8"
-amd = "0.2.2"
-thiserror = "1"
+enum_dispatch = "0.3.8"
+amd = "0.2.2"
+thiserror = "1.0"
+cfg-if = "1.0"
+
+# -------------------------------
+# features
+# -------------------------------
+
+[features]
+
+# enables blas/lapack for SDP support, with blas/lapack src unspecified
+sdp = ["blas","lapack"]
+
+# explicit configuration options for different blas flavours
+sdp-accelerate = ["sdp", "blas-src/accelerate", "lapack-src/accelerate"]
+sdp-netlib = ["sdp", "blas-src/netlib", "lapack-src/netlib"]
+sdp-openblas = ["sdp", "blas-src/openblas", "lapack-src/openblas"]
+sdp-mkl = ["sdp", "blas-src/intel-mkl", "lapack-src/intel-mkl"]
+
+# build as the julia interface
+julia = ["sdp", "dep:libc", "dep:num-derive", "dep:serde", "dep:serde_json"]
+
+# build as the python interface via maturin.
+# NB: python builds use scipy shared libraries
+# for blas/lapack, and should *not* explicitly
+# enable a blas/lapack source package
+python = ["sdp", "dep:libc", "dep:pyo3", "dep:num-derive"]
+
+
+# -------------------------------
+# blas / lapack configuration
+# -------------------------------
+
+[dependencies.blas]
+version = "0.22.0"
+optional = true
+
+[dependencies.lapack]
+version = "0.19.0"
+optional = true
+
+[dependencies.blas-src]
+version = "0.8"
+optional = true
+
+[dependencies.lapack-src]
+version = "0.8"
+optional = true
+
+
+# -------------------------------
+# examples
+# -------------------------------
[[example]]
name = "lp"
@@ -42,6 +97,11 @@ path = "examples/rust/example_expcone.rs"
name = "box"
path = "examples/rust/example_box.rs"
+[[example]]
+name = "sdp"
+path = "examples/rust/example_sdp.rs"
+required-features = ["sdp"]
+
# -------------------------------
@@ -56,15 +116,6 @@ debug = true
# Optional julia and python interface
# ------------------------------
-[features]
-
-# enables julia interface
-julia = ["dep:libc","dep:num-derive","dep:serde", "dep:serde_json"]
-
-# enables python interface via pyo3.
-# should only be used with maturin builds.
-python = ["dep:pyo3","dep:num-derive"]
-
[dependencies.num-derive]
optional = true
version = "0.2"
@@ -102,4 +153,6 @@ crate-type = ["lib","cdylib"]
# ------------------------------
[package.metadata.docs.rs]
-rustdoc-args = [ "--html-in-header", "./html/rustdocs-header.html" ]
\ No newline at end of file
+rustdoc-args = [ "--html-in-header", "./html/rustdocs-header.html" ]
+
+
diff --git a/README.md b/README.md
index 36f1188f..e421d718 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,7 @@ Interior Point Conic Optimization for Rust and Python
-
+
diff --git a/examples/python/example_sdp.py b/examples/python/example_sdp.py
new file mode 100644
index 00000000..d98a11a1
--- /dev/null
+++ b/examples/python/example_sdp.py
@@ -0,0 +1,20 @@
+import clarabel
+import numpy as np
+from scipy import sparse
+
+# Define problem data
+P = sparse.eye(6).tocsc()
+A = sparse.eye(6).tocsc()
+
+c = np.zeros(6)
+b = np.array([-3., 1., 4., 1., 2., 5.])
+
+cones = [clarabel.PSDTriangleConeT(3)]
+settings = clarabel.DefaultSettings()
+
+solver = clarabel.DefaultSolver(P,c,A,b,cones,settings)
+solution = solver.solve()
+print(
+ f"Solver terminated with status: {solution.status}, objective {solution.obj_val},\n"
+ f"and solution: {dict(s=solution.s, x=solution.x, z=solution.z)}"
+)
diff --git a/examples/rust/example_sdp.rs b/examples/rust/example_sdp.rs
new file mode 100644
index 00000000..059c07d2
--- /dev/null
+++ b/examples/rust/example_sdp.rs
@@ -0,0 +1,26 @@
+#![allow(non_snake_case)]
+
+fn main() {
+ use clarabel::algebra::*;
+ use clarabel::solver::*;
+
+ // SDP Example
+
+ let P = CscMatrix::identity(6);
+
+ // A = [1. 1;1 0; 0 1]; A = [-A;A]
+ let A = CscMatrix::identity(6);
+
+ let c = vec![0.0; 6];
+ let b = vec![-3., 1., 4., 1., 2., 5.];
+
+ let cones = vec![PSDTriangleConeT(3)];
+
+ let settings = DefaultSettings::default();
+
+ let mut solver = DefaultSolver::new(&P, &c, &A, &b, &cones, settings);
+
+ solver.solve();
+
+ println!("Solution = {:?}", solver.solution.x);
+}
diff --git a/examples/rust/example_socp.rs b/examples/rust/example_socp.rs
index 632aafcb..47b6c21f 100644
--- a/examples/rust/example_socp.rs
+++ b/examples/rust/example_socp.rs
@@ -11,24 +11,24 @@ fn main() {
let P = CscMatrix::new(
2, // m
2, // n
- vec![0, 1, 2], // colptr
- vec![0, 1], // rowval
- vec![6., 4.], // nzval
+ vec![0, 0, 1], // colptr
+ vec![1], // rowval
+ vec![2.], // nzval
);
- let q = vec![-1., -4.];
+ let q = vec![0., 0.];
let A = CscMatrix::new(
- 5, // m
- 2, // n
- vec![0, 3, 6], // colptr
- vec![0, 1, 3, 0, 2, 4], // rowval
- vec![1., 1., -1., -2., 1., -1.], // nzval
+ 3, // m
+ 2, // n
+ vec![0, 1, 2], // colptr
+ vec![1, 2], // rowval
+ vec![-2., -1.], // nzval
);
- let b = vec![0., 1., 1., 1., 1.];
+ let b = vec![1., -2., -2.];
- let cones = [ZeroConeT(1), NonnegativeConeT(4)];
+ let cones = [SecondOrderConeT(3)];
let settings = DefaultSettings::default();
diff --git a/pyproject.toml b/pyproject.toml
index 5997309f..366bb34f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -13,3 +13,5 @@ classifiers = [
"Programming Language :: Rust",
]
+[tool.maturin]
+python-source = "python"
\ No newline at end of file
diff --git a/python/clarabel/__init__.py b/python/clarabel/__init__.py
new file mode 100644
index 00000000..c40a6679
--- /dev/null
+++ b/python/clarabel/__init__.py
@@ -0,0 +1,8 @@
+from .clarabel import *
+
+__doc__ = clarabel.__doc__
+if hasattr(clarabel, "__all__"):
+ __all__ = clarabel.__all__
+
+clarabel.force_load_blas_lapack()
+
diff --git a/src/algebra/adjoint.rs b/src/algebra/adjoint.rs
new file mode 100644
index 00000000..ce2b1f6d
--- /dev/null
+++ b/src/algebra/adjoint.rs
@@ -0,0 +1,17 @@
+/// Adjoint of a matrix
+use crate::algebra::{Adjoint, MatrixShape, ShapedMatrix};
+
+impl<'a, M> ShapedMatrix for Adjoint<'a, M>
+where
+ M: ShapedMatrix,
+{
+ fn nrows(&self) -> usize {
+ self.src.ncols()
+ }
+ fn ncols(&self) -> usize {
+ self.src.nrows()
+ }
+ fn shape(&self) -> MatrixShape {
+ MatrixShape::T
+ }
+}
diff --git a/src/algebra/csc/block_concatenate.rs b/src/algebra/csc/block_concatenate.rs
new file mode 100644
index 00000000..b00b023f
--- /dev/null
+++ b/src/algebra/csc/block_concatenate.rs
@@ -0,0 +1,61 @@
+use crate::algebra::{BlockConcatenate, CscMatrix, FloatT, MatrixShape};
+
+impl BlockConcatenate for CscMatrix
+where
+ T: FloatT,
+{
+ fn hcat(A: &Self, B: &Self) -> Self {
+ //first check for compatible row dimensions
+ assert_eq!(A.m, B.m);
+
+ //dimensions for C = [A B];
+ let nnz = A.nnz() + B.nnz();
+ let m = A.m; //rows C
+ let n = A.n + B.n; //cols C
+ let mut C = CscMatrix::spalloc(m, n, nnz);
+
+ //we make dummy mapping indices since we don't care
+ //where the entries go. An alternative would be to
+ //modify the fill_block method to accept Option<_>
+ let mut amap = vec![0usize; A.nnz()];
+ let mut bmap = vec![0usize; B.nnz()];
+
+ //compute column counts and fill
+ C.colcount_block(A, 0, MatrixShape::N);
+ C.colcount_block(B, A.n, MatrixShape::N);
+ C.colcount_to_colptr();
+
+ C.fill_block(A, &mut amap, 0, 0, MatrixShape::N);
+ C.fill_block(B, &mut bmap, 0, A.n, MatrixShape::N);
+ C.backshift_colptrs();
+
+ C
+ }
+
+ fn vcat(A: &Self, B: &Self) -> Self {
+ //first check for compatible column dimensions
+ assert_eq!(A.n, B.n);
+
+ //dimensions for C = [A; B];
+ let nnz = A.nnz() + B.nnz();
+ let m = A.m + B.m; //rows C
+ let n = A.n; //cols C
+ let mut C = CscMatrix::spalloc(m, n, nnz);
+
+ //we make dummy mapping indices since we don't care
+ //where the entries go. An alternative would be to
+ //modify the fill_block method to accept Option<_>
+ let mut amap = vec![0usize; A.nnz()];
+ let mut bmap = vec![0usize; B.nnz()];
+
+ //compute column counts and fill
+ C.colcount_block(A, 0, MatrixShape::N);
+ C.colcount_block(B, 0, MatrixShape::N);
+ C.colcount_to_colptr();
+
+ C.fill_block(A, &mut amap, 0, 0, MatrixShape::N);
+ C.fill_block(B, &mut bmap, A.m, 0, MatrixShape::N);
+ C.backshift_colptrs();
+ C
+ }
+}
diff --git a/src/algebra/csc/core.rs b/src/algebra/csc/core.rs
new file mode 100644
index 00000000..30c2d7c5
--- /dev/null
+++ b/src/algebra/csc/core.rs
@@ -0,0 +1,285 @@
+#![allow(non_snake_case)]
+
+use crate::algebra::{Adjoint, FloatT, MatrixShape, ShapedMatrix, SparseFormatError, Symmetric};
+use std::iter::zip;
+
+/// Sparse matrix in standard Compressed Sparse Column (CSC) format
+///
+/// __Example usage__ : To construct the 3 x 3 matrix
+/// ```text
+/// A = [1. 3. 5.]
+/// [2. 0. 6.]
+/// [0. 4. 7.]
+/// ```
+///
+/// ```no_run
+/// use clarabel::algebra::CscMatrix;
+///
+/// let A : CscMatrix = CscMatrix::new(
+/// 3, // m
+/// 3, // n
+/// vec![0, 2, 4, 7], //colptr
+/// vec![0, 1, 0, 2, 0, 1, 2], //rowval
+/// vec![1., 2., 3., 4., 5., 6., 7.], //nzval
+/// );
+///
+/// // optional correctness check
+/// assert!(A.check_format().is_ok());
+///
+/// ```
+///
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct CscMatrix {
+ /// number of rows
+ pub m: usize,
+ /// number of columns
+ pub n: usize,
+ /// CSC format column pointer.
+ ///
+ /// Ths field should have length `n+1`. The last entry corresponds
+ /// to the the number of nonzeros and should agree with the lengths
+ /// of the `rowval` and `nzval` fields.
+ pub colptr: Vec,
+ /// vector of row indices
+ pub rowval: Vec,
+ /// vector of non-zero matrix elements
+ pub nzval: Vec,
+}
+
+impl CscMatrix
+where
+ T: FloatT,
+{
+ /// `CscMatrix` constructor.
+ ///
+ /// # Panics
+ /// Makes rudimentary dimensional compatibility checks and panics on
+ /// failure. This constructor does __not__
+ /// ensure that rows indices are all in bounds or that data is arranged
+ /// such that entries within each column appear in order of increasing
+ /// row index. Responsibility for ensuring these conditions hold
+ /// is left to the caller.
+ ///
+
+ pub fn new(m: usize, n: usize, colptr: Vec, rowval: Vec, nzval: Vec) -> Self {
+ assert_eq!(rowval.len(), nzval.len());
+ assert_eq!(colptr.len(), n + 1);
+ assert_eq!(colptr[n], rowval.len());
+ CscMatrix {
+ m,
+ n,
+ colptr,
+ rowval,
+ nzval,
+ }
+ }
+
+ /// allocate space for a sparse matrix with `nnz` elements
+ ///
+ /// To make an m x n matrix of zeros, use
+ /// ```no_run
+ /// use clarabel::algebra::CscMatrix;
+ /// let m = 3;
+ /// let n = 4;
+ /// let A : CscMatrix = CscMatrix::spalloc(m,n,0);
+ /// ```
+
+ pub fn spalloc(m: usize, n: usize, nnz: usize) -> Self {
+ let mut colptr = vec![0; n + 1];
+ let rowval = vec![0; nnz];
+ let nzval = vec![T::zero(); nnz];
+ colptr[n] = nnz;
+
+ CscMatrix::new(m, n, colptr, rowval, nzval)
+ }
+
+ /// Identity matrix of size `n`
+ pub fn identity(n: usize) -> Self {
+ let colptr = (0usize..=n).collect();
+ let rowval = (0usize..n).collect();
+ let nzval = vec![T::one(); n];
+
+ CscMatrix::new(n, n, colptr, rowval, nzval)
+ }
+
+ /// number of nonzeros
+ pub fn nnz(&self) -> usize {
+ self.colptr[self.n]
+ }
+
+ /// transpose
+ pub fn t(&self) -> Adjoint<'_, Self> {
+ Adjoint { src: self }
+ }
+
+ /// symmetric view
+ pub fn sym(&self) -> Symmetric<'_, Self> {
+ debug_assert!(self.is_triu());
+ Symmetric { src: self }
+ }
+
+ /// Check that matrix data is correctly formatted.
+ pub fn check_format(&self) -> Result<(), SparseFormatError> {
+ if self.rowval.len() != self.nzval.len() {
+ return Err(SparseFormatError::IncompatibleDimension);
+ }
+
+ if self.colptr.is_empty()
+ || (self.colptr.len() - 1) != self.n
+ || self.colptr[self.n] != self.rowval.len()
+ {
+ return Err(SparseFormatError::IncompatibleDimension);
+ }
+
+ //check for colptr monotonicity
+ if self.colptr.windows(2).any(|c| c[0] > c[1]) {
+ return Err(SparseFormatError::BadColptr);
+ }
+
+ //check for rowval monotonicity within each column
+ for col in 0..self.n {
+ let rng = self.colptr[col]..self.colptr[col + 1];
+ if self.rowval[rng].windows(2).any(|c| c[0] >= c[1]) {
+ return Err(SparseFormatError::BadRowval);
+ }
+ }
+ //check for row values out of bounds
+ if !self.rowval.iter().all(|r| r < &self.m) {
+ return Err(SparseFormatError::BadRowval);
+ }
+
+ Ok(())
+ }
+
+ /// Select a subset of the rows of a sparse matrix
+ ///
+ /// # Panics
+ /// Panics if row dimensions are incompatible
+
+ pub fn select_rows(&self, rowidx: &Vec) -> Self {
+ //first check for compatible row dimensions
+ assert_eq!(rowidx.len(), self.m);
+
+ //count the number of rows in the reduced matrix and build an
+ //index from the logical rowidx to the reduced row number
+ let mut rridx = vec![0; self.m];
+ let mut mred = 0;
+ for (r, is_used) in zip(&mut rridx, rowidx) {
+ if *is_used {
+ *r = mred;
+ mred += 1;
+ }
+ }
+
+ // count the nonzeros in Ared
+ let nzred = self.rowval.iter().filter(|&r| rowidx[*r]).count();
+
+ // Allocate a reduced size A
+ let mut Ared = CscMatrix::spalloc(mred, self.n, nzred);
+
+ //populate new matrix
+ let mut ptrred = 0;
+ for col in 0..self.n {
+ Ared.colptr[col] = ptrred;
+ for ptr in self.colptr[col]..self.colptr[col + 1] {
+ let thisrow = self.rowval[ptr];
+ if rowidx[thisrow] {
+ Ared.rowval[ptrred] = rridx[thisrow];
+ Ared.nzval[ptrred] = self.nzval[ptr];
+ ptrred += 1;
+ }
+ }
+ Ared.colptr[Ared.n] = ptrred;
+ }
+
+ Ared
+ }
+
+ /// Allocates a new matrix containing only entries from the upper triangular part
+
+ pub fn to_triu(&self) -> Self {
+ assert_eq!(self.m, self.n);
+ let (m, n) = (self.m, self.n);
+ let mut colptr = vec![0; n + 1];
+ let mut nnz = 0;
+
+ //count the number of entries in the upper triangle
+ //and place the totals into colptr
+
+ for col in 0..n {
+ //start / stop indices for the current column
+ let first = self.colptr[col];
+ let last = self.colptr[col + 1];
+ let rows = &self.rowval[first..last];
+
+ // number of entries on or above diagonal in this column,
+ // shifted by 1 (i.e. colptr keeps a 0 in the first column)
+ colptr[col + 1] = rows.iter().filter(|&row| *row <= col).count();
+ nnz += colptr[col + 1];
+ }
+
+ //allocate and copy the upper triangle entries of
+ //each column into the new value vector.
+ //NB! : assumes that entries in each column have
+ //monotonically increasing row numbers
+ let mut rowval = vec![0; nnz];
+ let mut nzval = vec![T::zero(); nnz];
+
+ for col in 0..n {
+ let ntriu = colptr[col + 1];
+
+ //start / stop indices for the destination
+ let fdest = colptr[col];
+ let ldest = fdest + ntriu;
+
+ //start / stop indices for the source
+ let fsrc = self.colptr[col];
+ let lsrc = fsrc + ntriu;
+
+ //copy upper triangle values
+ rowval[fdest..ldest].copy_from_slice(&self.rowval[fsrc..lsrc]);
+ nzval[fdest..ldest].copy_from_slice(&self.nzval[fsrc..lsrc]);
+
+ //this should now be cumsum of the counts
+ colptr[col + 1] = ldest;
+ }
+ CscMatrix::new(m, n, colptr, rowval, nzval)
+ }
+
+ pub fn is_triu(&self) -> bool {
+ // check lower triangle for any structural entries, regardless
+ // of the values that may be assigned to them
+ for col in 0..self.ncols() {
+ //start / stop indices for the current column
+ let first = self.colptr[col];
+ let last = self.colptr[col + 1];
+ let rows = &self.rowval[first..last];
+
+ // number of entries on or above diagonal in this column,
+ // shifted by 1 (i.e. colptr keeps a 0 in the first column)
+ if rows.iter().any(|&row| row > col) {
+ return false;
+ }
+ }
+ true
+ }
+}
+
+impl ShapedMatrix for CscMatrix {
+ fn nrows(&self) -> usize {
+ self.m
+ }
+ fn ncols(&self) -> usize {
+ self.n
+ }
+ fn size(&self) -> (usize, usize) {
+ (self.m, self.n)
+ }
+ fn shape(&self) -> MatrixShape {
+ MatrixShape::N
+ }
+ fn is_square(&self) -> bool {
+ self.m == self.n
+ }
+}
diff --git a/src/algebra/native/mod.rs b/src/algebra/csc/matrix_math.rs
similarity index 54%
rename from src/algebra/native/mod.rs
rename to src/algebra/csc/matrix_math.rs
index 25ca7346..03ad5d19 100644
--- a/src/algebra/native/mod.rs
+++ b/src/algebra/csc/matrix_math.rs
@@ -1,210 +1,33 @@
-use super::*;
-
-impl ScalarMath for T {
- fn clip(&self, min_thresh: T, max_thresh: T, min_new: T, max_new: T) -> T {
- if *self < min_thresh {
- min_new
- } else if *self > max_thresh {
- max_new
- } else {
- *self
- }
- }
-
- fn logsafe(&self) -> T {
- if *self <= T::zero() {
- -T::infinity()
- } else {
- self.ln()
- }
- }
-}
-
-impl VectorMath for [T] {
- fn copy_from(&mut self, src: &[T]) -> &mut Self {
- self.copy_from_slice(src);
- self
- }
-
- fn select(&self, index: &[bool]) -> Vec {
- assert_eq!(self.len(), index.len());
- self.iter()
- .zip(index)
- .filter(|(_x, &b)| b)
- .map(|(&x, _b)| x)
- .collect()
- }
+use crate::algebra::*;
+use std::iter::zip;
- fn scalarop(&mut self, op: impl Fn(T) -> T) -> &mut Self {
- for x in &mut *self {
- *x = op(*x);
- }
- self
- }
-
- fn scalarop_from(&mut self, op: impl Fn(T) -> T, v: &[T]) -> &mut Self {
- for (x, v) in self.iter_mut().zip(v) {
- *x = op(*v);
- }
- self
- }
-
- fn translate(&mut self, c: T) -> &mut Self {
- //NB: translate is a scalar shift of all variables and is only
- //used only in the NN cone to force vectors into R^n_+
- self.scalarop(|x| x + c)
- }
-
- fn set(&mut self, c: T) -> &mut Self {
- self.scalarop(|_x| c)
- }
+impl MatrixVectorMultiply for CscMatrix {
+ type T = T;
- fn scale(&mut self, c: T) -> &mut Self {
- self.scalarop(|x| x * c)
- }
-
- fn recip(&mut self) -> &mut Self {
- self.scalarop(T::recip)
- }
-
- fn sqrt(&mut self) -> &mut Self {
- self.scalarop(T::sqrt)
- }
-
- fn rsqrt(&mut self) -> &mut Self {
- self.scalarop(|x| T::recip(T::sqrt(x)))
- }
-
- fn negate(&mut self) -> &mut Self {
- self.scalarop(|x| -x)
- }
-
- fn hadamard(&mut self, y: &[T]) -> &mut Self {
- self.iter_mut().zip(y).for_each(|(x, y)| *x *= *y);
- self
- }
-
- fn clip(&mut self, min_thresh: T, max_thresh: T, min_new: T, max_new: T) -> &mut Self {
- self.scalarop(|x| x.clip(min_thresh, max_thresh, min_new, max_new))
- }
-
- fn normalize(&mut self) -> T {
- let norm = self.norm();
- if norm == T::zero() {
- return T::zero();
- }
- self.scale(norm.recip());
- norm
- }
-
- fn dot(&self, y: &[T]) -> T {
- self.iter()
- .zip(y)
- .fold(T::zero(), |acc, (&x, &y)| acc + x * y)
- }
-
- fn dot_shifted(z: &[T], s: &[T], dz: &[T], ds: &[T], α: T) -> T {
- assert_eq!(z.len(), s.len());
- assert_eq!(z.len(), dz.len());
- assert_eq!(s.len(), ds.len());
-
- let s_ds = s.iter().zip(ds.iter());
- let z_dz = z.iter().zip(dz.iter());
- let mut out = T::zero();
- for ((&s, &ds), (&z, &dz)) in s_ds.zip(z_dz) {
- let si = s + α * ds;
- let zi = z + α * dz;
- out += si * zi;
- }
- out
- }
-
- fn dist(&self, y: &Self) -> T {
- let dist2 = self
- .iter()
- .zip(y)
- .fold(T::zero(), |acc, (&x, &y)| acc + T::powi(x - y, 2));
- T::sqrt(dist2)
- }
-
- fn sumsq(&self) -> T {
- self.dot(self)
- }
-
- // 2-norm
- fn norm(&self) -> T {
- T::sqrt(self.sumsq())
- }
-
- //scaled norm of elementwise produce self.*v
- fn norm_scaled(&self, v: &[T]) -> T {
- assert_eq!(self.len(), v.len());
- let total = self.iter().zip(v).fold(T::zero(), |acc, (&x, &y)| {
- let prod = x * y;
- acc + prod * prod
- });
- T::sqrt(total)
- }
-
- // Returns infinity norm, ignoring NaNs
- fn norm_inf(&self) -> T {
- let mut out = T::zero();
- for v in self.iter().map(|v| v.abs()) {
- out = if v > out { v } else { out };
- }
- out
- }
-
- // Returns one norm
- fn norm_one(&self) -> T {
- self.iter().fold(T::zero(), |acc, v| acc + v.abs())
- }
-
- fn minimum(&self) -> T {
- self.iter().fold(T::infinity(), |r, &s| T::min(r, s))
- }
-
- fn maximum(&self) -> T {
- self.iter().fold(-T::infinity(), |r, &s| T::max(r, s))
- }
-
- fn mean(&self) -> T {
- let mean = if self.is_empty() {
- T::zero()
- } else {
- let num = self.iter().fold(T::zero(), |r, &s| r + s);
- let den = T::from_usize(self.len()).unwrap();
- num / den
- };
- mean
- }
-
- fn is_finite(&self) -> bool {
- self.iter().all(|&x| T::is_finite(x))
+ fn gemv(&self, y: &mut [T], x: &[T], a: T, b: T) {
+ _csc_axpby_N(self, y, x, a, b);
}
+}
- fn axpby(&mut self, a: T, x: &[T], b: T) -> &mut Self {
- assert_eq!(self.len(), x.len());
+impl MatrixVectorMultiply for Adjoint<'_, CscMatrix> {
+ type T = T;
- let yx = self.iter_mut().zip(x);
- yx.for_each(|(y, x)| *y = a * (*x) + b * (*y));
- self
+ fn gemv(&self, y: &mut [T], x: &[T], a: T, b: T) {
+ _csc_axpby_T(self.src, y, x, a, b);
}
+}
- fn waxpby(&mut self, a: T, x: &[T], b: T, y: &[T]) -> &mut Self {
- assert_eq!(self.len(), x.len());
- assert_eq!(self.len(), y.len());
-
- let xy = x.iter().zip(y);
+impl SymMatrixVectorMultiply for Symmetric<'_, CscMatrix> {
+ type T = T;
- for (w, (x, y)) in self.iter_mut().zip(xy) {
- *w = a * (*x) + b * (*y);
- }
- self
+ fn symv(&self, y: &mut [T], x: &[T], a: T, b: T) {
+ _csc_symv_unsafe(self.src, y, x, a, b);
}
}
-impl MatrixMath for CscMatrix {
+impl MatrixMath for CscMatrix {
+ type T = T;
+
//scalar mut operations
fn scale(&mut self, c: T) {
self.nzval.scale(c);
@@ -222,12 +45,6 @@ impl MatrixMath for CscMatrix {
fn col_norms_no_reset(&self, norms: &mut [T]) {
assert_eq!(norms.len(), self.colptr.len() - 1);
- // for (i,v) in norms.iter_mut().enumerate(){
- // for j in self.colptr[i]..self.colptr[i + 1]{
- // let tmp = T::abs(self.nzval[j]);
- // *v = T::max(*v,tmp);
- // }
- // }
for (i, v) in norms.iter_mut().enumerate() {
*v = self
.nzval
@@ -264,16 +81,13 @@ impl MatrixMath for CscMatrix {
fn row_norms_no_reset(&self, norms: &mut [T]) {
assert_eq!(self.rowval.len(), *self.colptr.last().unwrap());
- for (row, val) in self.rowval.iter().zip(self.nzval.iter()) {
+ for (row, val) in zip(&self.rowval, &self.nzval) {
norms[*row] = T::max(norms[*row], T::abs(*val));
}
}
fn lscale(&mut self, l: &[T]) {
- let rows = &self.rowval;
- let vals = &mut self.nzval;
-
- for (val, row) in vals.iter_mut().zip(rows) {
+ for (val, row) in zip(&mut self.nzval, &self.rowval) {
*val *= l[*row];
}
}
@@ -296,30 +110,12 @@ impl MatrixMath for CscMatrix {
let vals = &mut self.nzval[first..last];
let rows = &self.rowval[first..last];
- for (val, row) in vals.iter_mut().zip(rows) {
+ for (val, row) in zip(vals, rows) {
*val *= l[*row] * ri;
}
}
}
- fn gemv(&self, y: &mut [T], trans: MatrixShape, x: &[T], a: T, b: T) {
- match trans {
- MatrixShape::N => _csc_axpby_N(self, y, x, a, b),
- MatrixShape::T => _csc_axpby_T(self, y, x, a, b),
- }
- }
-
- fn symv(&self, y: &mut [T], tri: MatrixTriangle, x: &[T], a: T, b: T) {
- //NB: the triangle argument doesn't actually do
- //anything here, and the call is the same either
- //way. The argument serves only as a reminder that
- //the caller should only pass a triangular form
- match tri {
- MatrixTriangle::Triu => _csc_symv_unsafe(self, y, x, a, b),
- MatrixTriangle::Tril => _csc_symv_unsafe(self, y, x, a, b),
- }
- }
-
fn quad_form(&self, y: &[T], x: &[T]) -> T {
_csc_quad_form(self, y, x)
}
@@ -339,7 +135,7 @@ fn _csc_symv_safe(A: &CscMatrix, y: &mut [T], x: &[T], a: T, b: T)
let rows = &A.rowval[first..last];
let nzvals = &A.nzval[first..last];
- for (&row, &Aij) in rows.iter().zip(nzvals.iter()) {
+ for (&row, &Aij) in zip(rows, nzvals) {
y[row] += a * Aij * xcol;
if row != col {
@@ -411,9 +207,8 @@ fn _csc_quad_form(M: &CscMatrix, y: &[T], x: &[T]) -> T {
let values = &M.nzval[first..last];
let rows = &M.rowval[first..last];
- let iter = values.iter().zip(rows.iter());
- for (&Mv, &row) in iter {
+ for (&Mv, &row) in zip(values, rows) {
if row < col {
//triu terms only
tmp1 += Mv * x[row];
diff --git a/src/algebra/csc/mod.rs b/src/algebra/csc/mod.rs
new file mode 100644
index 00000000..365377d0
--- /dev/null
+++ b/src/algebra/csc/mod.rs
@@ -0,0 +1,10 @@
+#![allow(non_snake_case)]
+
+mod core;
+pub use self::core::*;
+mod utils;
+pub use utils::*;
+mod matrix_math;
+pub use matrix_math::*;
+mod block_concatenate;
+pub use block_concatenate::*;
diff --git a/src/algebra/csc/utils.rs b/src/algebra/csc/utils.rs
new file mode 100644
index 00000000..8fb89e44
--- /dev/null
+++ b/src/algebra/csc/utils.rs
@@ -0,0 +1,278 @@
+//---------------------------------------------------------
+// low-level internal utilities for counting / filling entries
+// in block partitioned sparse matrices.
+//---------------------------------------------------------
+
+use crate::algebra::{CscMatrix, FloatT, MatrixShape, MatrixTriangle};
+use std::iter::zip;
+
+impl CscMatrix
+where
+ T: FloatT,
+{
+ // increment self.colptr by the number of nonzeros
+ // in a dense upper/lower triangle on the diagonal.
+ pub(crate) fn colcount_dense_triangle(
+ &mut self,
+ initcol: usize,
+ blockcols: usize,
+ shape: MatrixTriangle,
+ ) {
+ let cols = self.colptr[(initcol)..(initcol + blockcols)].iter_mut();
+ let counts = 1..(blockcols + 1);
+ match shape {
+ MatrixTriangle::Triu => {
+ zip(cols, counts).for_each(|(x, c)| *x += c);
+ }
+
+ MatrixTriangle::Tril => {
+ zip(cols, counts.rev()).for_each(|(x, c)| *x += c);
+ }
+ }
+ }
+
+ // increment the self.colptr by the number of nonzeros
+ // in a square diagonal matrix placed on the diagonal.
+ pub(crate) fn colcount_diag(&mut self, initcol: usize, blockcols: usize) {
+ let cols = self.colptr[initcol..(initcol + blockcols)].iter_mut();
+ cols.for_each(|x| *x += 1);
+ }
+
+ // same as kkt_count_diag, but counts places
+ // where the input matrix M has a missing
+ // diagonal entry. M must be square and TRIU
+ pub(crate) fn colcount_missing_diag(&mut self, M: &CscMatrix, initcol: usize) {
+ assert_eq!(M.colptr.len(), M.n + 1);
+ assert!(self.colptr.len() >= M.n + initcol);
+
+ for i in 0..M.n {
+ if M.colptr[i] == M.colptr[i+1] || // completely empty column
+ M.rowval[M.colptr[i+1]-1] != i
+ // last element is not on diagonal
+ {
+ self.colptr[i + initcol] += 1;
+ }
+ }
+ }
+
+ // increment the self.colptr by the a number of nonzeros.
+ // used to account for the placement of a column
+ // vector that partially populates the column
+ pub(crate) fn colcount_colvec(&mut self, n: usize, _firstrow: usize, firstcol: usize) {
+ // just add the vector length to this column
+ self.colptr[firstcol] += n;
+ }
+
+ // increment the self.colptr by 1 for every element
+ // used to account for the placement of a column
+ // vector that partially populates the column
+ pub(crate) fn colcount_rowvec(&mut self, n: usize, _firstrow: usize, firstcol: usize) {
+ // add one element to each of n consective columns
+ // starting from initcol. The row index doesn't
+ // matter here.
+ let cols = self.colptr[firstcol..(firstcol + n)].iter_mut();
+ cols.for_each(|x| *x += 1);
+ }
+
+ // increment the self.colptr by the number of nonzeros in M
+
+ pub(crate) fn colcount_block(&mut self, M: &CscMatrix, initcol: usize, shape: MatrixShape) {
+ match shape {
+ MatrixShape::T => {
+ for row in M.rowval.iter() {
+ self.colptr[initcol + row] += 1;
+ }
+ }
+ MatrixShape::N => {
+ // just add the column count
+ for i in 0..M.n {
+ self.colptr[initcol + i] += M.colptr[i + 1] - M.colptr[i];
+ }
+ }
+ }
+ }
+
+ //populate a partial column with zeros using the self.colptr as the indicator
+ // the next fill location in each row.
+ pub(crate) fn fill_colvec(&mut self, vtoKKT: &mut [usize], initrow: usize, initcol: usize) {
+ for (i, v) in vtoKKT.iter_mut().enumerate() {
+ let dest = self.colptr[initcol];
+ self.rowval[dest] = initrow + i;
+ self.nzval[dest] = T::zero();
+ *v = dest;
+ self.colptr[initcol] += 1;
+ }
+ }
+
+ // populate a partial row with zeros using the self.colptr as indicator of
+ // next fill location in each row.
+ pub(crate) fn fill_rowvec(&mut self, vtoKKT: &mut [usize], initrow: usize, initcol: usize) {
+ for (i, v) in vtoKKT.iter_mut().enumerate() {
+ let col = initcol + i;
+ let dest = self.colptr[col];
+ self.rowval[dest] = initrow;
+ self.nzval[dest] = T::zero();
+ *v = dest;
+ self.colptr[col] += 1;
+ }
+ }
+
+ // populate values from M using the self.colptr as indicator of
+ // next fill location in each row.
+ pub(crate) fn fill_block(
+ &mut self,
+ M: &CscMatrix,
+ MtoKKT: &mut [usize],
+ initrow: usize,
+ initcol: usize,
+ shape: MatrixShape,
+ ) {
+ for i in 0..M.n {
+ let z = zip(&M.rowval, &M.nzval);
+ let start = M.colptr[i];
+ let stop = M.colptr[i + 1];
+
+ for (j, (&Mrow, &Mval)) in z.take(stop).skip(start).enumerate() {
+ let (col, row);
+
+ match shape {
+ MatrixShape::T => {
+ col = Mrow + initcol;
+ row = i + initrow;
+ }
+ MatrixShape::N => {
+ col = i + initcol;
+ row = Mrow + initrow;
+ }
+ };
+
+ let dest = self.colptr[col];
+ self.rowval[dest] = row;
+ self.nzval[dest] = Mval;
+ self.colptr[col] += 1;
+ MtoKKT[j] = dest;
+ }
+ }
+ }
+
+ // Populate the upper or lower triangle with 0s using the self.colptr
+ // as indicator of next fill location in each row
+ pub(crate) fn fill_dense_triangle(
+ &mut self,
+ blocktoKKT: &mut [usize],
+ offset: usize,
+ blockdim: usize,
+ shape: MatrixTriangle,
+ ) {
+ // data will always be supplied as triu, so when filling it into
+ // a tril shape we also need to transpose it. Just write two
+ // separate functions for clarity here
+
+ match shape {
+ MatrixTriangle::Triu => {
+ self._fill_dense_triangle_triu(blocktoKKT, offset, blockdim);
+ }
+
+ MatrixTriangle::Tril => {
+ self._fill_dense_triangle_tril(blocktoKKT, offset, blockdim);
+ }
+ }
+ }
+
+ pub(crate) fn _fill_dense_triangle_triu(
+ &mut self,
+ blocktoKKT: &mut [usize],
+ offset: usize,
+ blockdim: usize,
+ ) {
+ let mut kidx = 0;
+ for col in offset..(offset + blockdim) {
+ for row in offset..=col {
+ let dest = self.colptr[col];
+ self.rowval[dest] = row;
+ self.nzval[dest] = T::zero(); //structural zero
+ self.colptr[col] += 1;
+ blocktoKKT[kidx] = dest;
+ kidx += 1;
+ }
+ }
+ }
+
+ pub(crate) fn _fill_dense_triangle_tril(
+ &mut self,
+ blocktoKKT: &mut [usize],
+ offset: usize,
+ blockdim: usize,
+ ) {
+ let mut kidx = 0;
+ for col in offset..(offset + blockdim) {
+ for row in offset..=col {
+ let dest = self.colptr[col];
+ self.rowval[dest] = row;
+ self.nzval[dest] = T::zero(); //structural zero
+ self.colptr[col] += 1;
+ blocktoKKT[kidx] = dest;
+ kidx += 1;
+ }
+ }
+ }
+
+ // Populate the diagonal with 0s using the K.colptr as indicator of
+ // next fill location in each row
+ pub(crate) fn fill_diag(&mut self, diagtoKKT: &mut [usize], offset: usize, blockdim: usize) {
+ for (i, col) in (offset..(offset + blockdim)).enumerate() {
+ let dest = self.colptr[col];
+ self.rowval[dest] = col;
+ self.nzval[dest] = T::zero(); //structural zero
+ self.colptr[col] += 1;
+ diagtoKKT[i] = dest;
+ }
+ }
+
+ // same as fill_diag, but only places zero
+ // entries where the input matrix M has a missing
+ // diagonal entry. M must be square and TRIU
+ pub(crate) fn fill_missing_diag(&mut self, M: &CscMatrix, initcol: usize) {
+ for i in 0..M.n {
+ // fill out missing diagonal terms only
+ if M.colptr[i] == M.colptr[i+1] || // completely empty column
+ M.rowval[M.colptr[i+1]-1] != i
+ {
+ // last element is not on diagonal
+ let dest = self.colptr[i + initcol];
+ self.rowval[dest] = i + initcol;
+ self.nzval[dest] = T::zero(); //structural zero
+ self.colptr[i] += 1;
+ }
+ }
+ }
+
+ pub(crate) fn colcount_to_colptr(&mut self) {
+ let mut currentptr = 0;
+ for p in &mut self.colptr {
+ let count = *p;
+ *p = currentptr;
+ currentptr += count;
+ }
+ }
+
+ pub(crate) fn backshift_colptrs(&mut self) {
+ self.colptr.rotate_right(1);
+ self.colptr[0] = 0;
+ }
+
+ pub(crate) fn count_diagonal_entries(&self) -> usize {
+ let mut count = 0;
+ for i in 0..self.n {
+ // compare last entry in each column with
+ // its row number to identify diagonal entries
+ if self.colptr[i+1] != self.colptr[i] && // nonempty column
+ self.rowval[self.colptr[i+1]-1] == i
+ {
+ // last element is on diagonal
+ count += 1;
+ }
+ }
+ count
+ }
+}
diff --git a/src/algebra/dense/blas.rs b/src/algebra/dense/blas.rs
new file mode 100644
index 00000000..1fcbbc84
--- /dev/null
+++ b/src/algebra/dense/blas.rs
@@ -0,0 +1,336 @@
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(clippy::too_many_arguments)]
+
+cfg_if::cfg_if! {
+ if #[cfg(feature="python")] {
+ // imports via scipy
+ use crate::python::pyblas::*;
+ }
+ else {
+ // standard imports via blas-lapack-rs crates
+ extern crate blas_src;
+ extern crate lapack_src;
+ use lapack::*;
+ use blas::*;
+ }
+}
+
+
+
+
+pub trait BlasFloatT:
+ private::BlasFloatSealed
+ + XsyevrScalar
+ + XpotrfScalar
+ + XgesddScalar
+ + XgesvdScalar
+ + XgemmScalar
+ + XgemvScalar
+ + XsymvScalar
+ + XsyrkScalar
+ + Xsyr2kScalar
+{}
+impl BlasFloatT for f32 {}
+impl BlasFloatT for f64 {}
+
+mod private {
+ pub trait BlasFloatSealed {}
+ impl BlasFloatSealed for f32 {}
+ impl BlasFloatSealed for f64 {}
+}
+
+
+// --------------------------------------
+// ?syevr : Symmetric eigen decomposition
+// --------------------------------------
+
+pub trait XsyevrScalar: Sized {
+ fn xsyevr(
+ jobz: u8, range: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, vl: Self, vu: Self, il: i32, iu: i32,
+ abstol: Self, m: &mut i32, w: &mut [Self], z: &mut [Self], ldz: i32, isuppz: &mut [i32],
+ work: &mut [Self], lwork: i32, iwork: &mut [i32], liwork: i32, info: &mut i32,
+ );
+}
+
+macro_rules! impl_blas_xsyevr {
+ ($T:ty, $XSYEVR:path) => {
+ impl XsyevrScalar for $T {
+ fn xsyevr(
+ jobz: u8, range: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, vl: Self, vu: Self, il: i32, iu: i32,
+ abstol: Self, m: &mut i32, w: &mut [Self], z: &mut [Self], ldz: i32, isuppz: &mut [i32],
+ work: &mut [$T], lwork: i32, iwork: &mut [i32], liwork: i32, info: &mut i32,
+ ) {
+ unsafe{
+ $XSYEVR(
+ jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m,
+ w, z, ldz, isuppz, work, lwork, iwork, liwork, info,
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_blas_xsyevr!(f32, ssyevr);
+impl_blas_xsyevr!(f64, dsyevr);
+
+// --------------------------------------
+// ?potrf : Cholesky decomposition
+// --------------------------------------
+
+pub trait XpotrfScalar: Sized {
+ fn xpotrf(
+ uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32
+ );
+}
+
+macro_rules! impl_blas_xpotrf{
+ ($T:ty, $XPOTRF:path) => {
+ impl XpotrfScalar for $T {
+ fn xpotrf(
+ uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32
+ ) {
+ unsafe{
+ $XPOTRF(
+ uplo, n, a, lda, info
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_blas_xpotrf!(f32, spotrf);
+impl_blas_xpotrf!(f64, dpotrf);
+
+
+// --------------------------------------
+// ?gesdd : SVD (divide and conquer method)
+// --------------------------------------
+
+pub trait XgesddScalar: Sized {
+ fn xgesdd(
+ jobz: u8, m: i32, n: i32, a: &mut [Self], lda: i32,
+ s: &mut [Self], u: &mut [Self], ldu: i32, vt: &mut [Self], ldvt: i32,
+ work: &mut [Self], lwork: i32, iwork: &mut [i32], info: &mut i32
+ );
+}
+
+macro_rules! impl_blas_xgesdd{
+ ($T:ty, $XGESDD:path) => {
+ impl XgesddScalar for $T {
+ fn xgesdd(
+ jobz: u8, m: i32, n: i32, a: &mut [Self], lda: i32,
+ s: &mut [Self], u: &mut [Self], ldu: i32, vt: &mut [Self], ldvt: i32,
+ work: &mut [Self], lwork: i32, iwork: &mut [i32], info: &mut i32
+ ) {
+ unsafe{
+ $XGESDD(
+ jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_blas_xgesdd!(f32, sgesdd);
+impl_blas_xgesdd!(f64, dgesdd);
+
+
+// --------------------------------------
+// ?gesvd : SVD (QR method)
+// --------------------------------------
+
+pub trait XgesvdScalar: Sized {
+ fn xgesvd(
+ jobu: u8, jobvt: u8,m: i32, n: i32, a: &mut [Self], lda: i32,
+ s: &mut [Self], u: &mut [Self], ldu: i32, vt: &mut [Self], ldvt: i32,
+ work: &mut [Self], lwork: i32, info: &mut i32
+ );
+}
+
+macro_rules! impl_blas_xgesvd{
+ ($T:ty, $XGESVD:path) => {
+ impl XgesvdScalar for $T {
+ fn xgesvd(
+ jobu: u8, jobvt: u8,m: i32, n: i32, a: &mut [Self], lda: i32,
+ s: &mut [Self], u: &mut [Self], ldu: i32, vt: &mut [Self], ldvt: i32,
+ work: &mut [Self], lwork: i32, info: &mut i32
+ ) {
+ unsafe{
+ $XGESVD(
+ jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, info
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_blas_xgesvd!(f32, sgesvd);
+impl_blas_xgesvd!(f64, dgesvd);
+
+
+// --------------------------------------
+// ?gemm : matrix matrix multiply
+// --------------------------------------
+
+pub trait XgemmScalar: Sized {
+ fn xgemm(
+ transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: Self, a: &[Self],
+ lda: i32, b: &[Self], ldb: i32, beta: Self, c: &mut [Self], ldc: i32
+ );
+}
+
+macro_rules! impl_blas_gemm {
+ ($T:ty, $XGEMM:path) => {
+ impl XgemmScalar for $T {
+ fn xgemm(
+ transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: Self, a: &[Self],
+ lda: i32, b: &[Self], ldb: i32, beta: Self, c: &mut [Self], ldc: i32
+ ) {
+ unsafe{
+ $XGEMM(
+ transa, transb, m, n, k, alpha, a,
+ lda, b, ldb, beta, c, ldc
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_blas_gemm!(f32, sgemm);
+impl_blas_gemm!(f64, dgemm);
+
+// --------------------------------------
+// ?gemv : matrix vector multiply (general shape)
+// --------------------------------------
+
+pub trait XgemvScalar: Sized {
+ fn xgemv(
+ trans: u8, m: i32, n: i32, alpha: Self, a: &[Self], lda: i32,
+ x: &[Self], incx: i32, beta: Self, y: &mut [Self], incy: i32
+ );
+}
+
+
+macro_rules! impl_blas_gemv {
+ ($T:ty, $XGEMV:path) => {
+ impl XgemvScalar for $T {
+ fn xgemv(
+ trans: u8, m: i32, n: i32, alpha: Self, a: &[Self], lda: i32,
+ x: &[Self], incx: i32, beta: Self, y: &mut [Self], incy: i32
+ ) {
+ unsafe{
+ $XGEMV(
+ trans, m, n, alpha, a, lda, x, incx, beta, y, incy
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_blas_gemv!(f32, sgemv);
+impl_blas_gemv!(f64, dgemv);
+
+
+// --------------------------------------
+// ?symv : matrix vector multiply (symmetric)
+// --------------------------------------
+
+pub trait XsymvScalar: Sized {
+ fn xsymv(
+ uplo: u8, n: i32, alpha: Self, a: &[Self], lda: i32,
+ x: &[Self], incx: i32, beta: Self, y: &mut [Self], incy: i32
+ );
+}
+
+
+macro_rules! impl_blas_gsymv {
+ ($T:ty, $XSYMV:path) => {
+ impl XsymvScalar for $T {
+ fn xsymv(
+ uplo: u8, n: i32, alpha: Self, a: &[Self], lda: i32,
+ x: &[Self], incx: i32, beta: Self, y: &mut [Self], incy: i32
+ ) {
+ unsafe{
+ $XSYMV(
+ uplo, n, alpha, a, lda, x, incx, beta, y, incy
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_blas_gsymv!(f32, ssymv);
+impl_blas_gsymv!(f64, dsymv);
+
+
+// --------------------------------------
+// ?syrk : symmetric rank k update
+// --------------------------------------
+
+pub trait XsyrkScalar: Sized {
+ fn xsyrk(
+ uplo: u8, trans: u8, n: i32, k: i32, alpha: Self,
+ a: &[Self], lda: i32, beta: Self, c: &mut [Self], ldc: i32
+ );
+}
+
+
+macro_rules! impl_blas_gsyrk {
+ ($T:ty, $XSYRK:path) => {
+ impl XsyrkScalar for $T {
+ fn xsyrk(
+ uplo: u8, trans: u8, n: i32, k: i32, alpha: Self,
+ a: &[Self], lda: i32, beta: Self, c: &mut [Self], ldc: i32
+ ) {
+ unsafe{
+ $XSYRK(
+ uplo, trans, n, k, alpha, a, lda, beta, c, ldc
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_blas_gsyrk!(f32, ssyrk);
+impl_blas_gsyrk!(f64, dsyrk);
+
+// --------------------------------------
+// ?syrk : symmetric rank 2k update
+// --------------------------------------
+
+pub trait Xsyr2kScalar: Sized {
+ fn xsyr2k(
+ uplo: u8, trans: u8, n: i32, k: i32, alpha: Self, a: &[Self], lda: i32,
+ b: &[Self], ldb: i32, beta: Self, c: &mut [Self], ldc: i32
+ );
+}
+
+
+macro_rules! impl_blas_gsyr2k {
+ ($T:ty, $XSYR2K:path) => {
+ impl Xsyr2kScalar for $T {
+ fn xsyr2k(
+ uplo: u8, trans: u8, n: i32, k: i32, alpha: Self, a: &[Self], lda: i32,
+ b: &[Self], ldb: i32, beta: Self, c: &mut [Self], ldc: i32
+ ) {
+ unsafe{
+ $XSYR2K(
+ uplo, trans, n, k, alpha, a, lda,
+ b, ldb, beta, c, ldc
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_blas_gsyr2k!(f32, ssyr2k);
+impl_blas_gsyr2k!(f64, dsyr2k);
\ No newline at end of file
diff --git a/src/algebra/dense/blaslike_traits.rs b/src/algebra/dense/blaslike_traits.rs
new file mode 100644
index 00000000..e120bbc6
--- /dev/null
+++ b/src/algebra/dense/blaslike_traits.rs
@@ -0,0 +1,58 @@
+#![allow(non_snake_case)]
+use crate::algebra::{DenseFactorizationError, DenseMatrix, Matrix};
+
+pub(crate) trait FactorEigen {
+ type T;
+ // computes eigenvalues only (full set)
+ fn eigvals(&mut self, A: &mut Matrix) -> Result<(), DenseFactorizationError>;
+ // computes eigenvalues and vectors (full set)
+ fn eigen(&mut self, A: &mut Matrix) -> Result<(), DenseFactorizationError>;
+}
+
+pub(crate) trait FactorCholesky {
+ type T;
+ // computes the Cholesky decomposition. Only the upper
+ // part of the input A will be referenced, and A will
+ // by modified in place. The Cholesky factor is stored
+ // in self.L
+ fn cholesky(&mut self, A: &mut Matrix) -> Result<(), DenseFactorizationError>;
+}
+
+pub(crate) trait FactorSVD {
+ type T;
+ // compute "economy size" SVD. Values in A are overwritten
+ // as internal working space.
+ fn svd(&mut self, A: &mut Matrix) -> Result<(), DenseFactorizationError>;
+}
+
+pub(crate) trait MultiplySYRK {
+ type T;
+ fn syrk(&mut self, A: &MATA, α: Self::T, β: Self::T) -> &Self
+ where
+ MATA: DenseMatrix;
+}
+
+pub(crate) trait MultiplySYR2K {
+ type T;
+ fn syr2k(
+ &mut self, A: &Matrix, B: &Matrix, α: Self::T, β: Self::T
+ ) -> &Self;
+}
+
+pub(crate) trait MultiplyGEMM {
+ type T;
+ fn mul(&mut self, A: &MATA, B: &MATB, α: Self::T, β: Self::T) -> &Self
+ where
+ MATB: DenseMatrix,
+ MATA: DenseMatrix;
+}
+
+pub(crate) trait MultiplyGEMV {
+ type T;
+ fn gemv(&self, x: &[Self::T], y: &mut [Self::T], α: Self::T, β: Self::T);
+}
+
+pub(crate) trait MultiplySYMV {
+ type T;
+ fn symv(&self, x: &[Self::T], y: &mut [Self::T], α: Self::T, β: Self::T);
+}
diff --git a/src/algebra/dense/block_concatenate.rs b/src/algebra/dense/block_concatenate.rs
new file mode 100644
index 00000000..2986aa99
--- /dev/null
+++ b/src/algebra/dense/block_concatenate.rs
@@ -0,0 +1,51 @@
+#![allow(non_snake_case)]
+use crate::algebra::{BlockConcatenate, FloatT, Matrix, ShapedMatrix};
+use std::iter::Extend;
+
+impl BlockConcatenate for Matrix
+where
+ T: FloatT,
+{
+ fn hcat(A: &Self, B: &Self) -> Self {
+ //first check for compatible row dimensions
+ assert_eq!(A.m, B.m);
+
+ //dimensions for C = [A B];
+ let m = A.m; //rows
+ let n = A.n + B.n; //cols s
+ let mut data = A.data.clone();
+ data.extend(&B.data);
+ Self { m, n, data }
+ }
+
+ fn vcat(A: &Self, B: &Self) -> Self {
+ //first check for compatible column dimensions
+ assert_eq!(A.n, B.n);
+
+ //dimensions for C = [A; B];
+ let m = A.m + B.m; //rows C
+ let n = A.n; //cols C
+ let mut data = vec![];
+ data.reserve_exact(m * n);
+
+ for col in 0..A.ncols() {
+ data.extend(A.col_slice(col));
+ data.extend(B.col_slice(col));
+ }
+ Self { m, n, data }
+ }
+}
+
+#[test]
+fn test_dense_concatenate() {
+ let A = Matrix::new((2, 2), vec![1., 2., 3., 4.]);
+ let B = Matrix::new((2, 2), vec![5., 6., 7., 8.]);
+
+ let C = Matrix::hcat(&A, &B);
+ assert!(C.data == [1., 2., 3., 4., 5., 6., 7., 8.]);
+ assert!(C.size() == (2, 4));
+
+ let C = Matrix::vcat(&A, &B);
+ assert!(C.data == [1., 2., 5., 6., 3., 4., 7., 8.]);
+ assert!(C.size() == (4, 2));
+}
diff --git a/src/algebra/dense/cholesky.rs b/src/algebra/dense/cholesky.rs
new file mode 100644
index 00000000..85a2e2d8
--- /dev/null
+++ b/src/algebra/dense/cholesky.rs
@@ -0,0 +1,76 @@
+#![allow(non_snake_case)]
+
+use crate::algebra::{
+ DenseFactorizationError, FactorCholesky, FloatT, Matrix, MatrixTriangle, ShapedMatrix,
+ VectorMath,
+};
+
+pub(crate) struct CholeskyEngine {
+ /// lower triangular factor (stored as square dense)
+ pub L: Matrix,
+}
+
+impl CholeskyEngine
+where
+ T: FloatT,
+{
+ pub fn new(n: usize) -> Self {
+ let L = Matrix::::zeros((n, n));
+ Self { L }
+ }
+}
+
+impl FactorCholesky for CholeskyEngine
+where
+ T: FloatT,
+{
+ type T = T;
+ fn cholesky(&mut self, A: &mut Matrix) -> Result<(), DenseFactorizationError> {
+ if A.size() != self.L.size() {
+ return Err(DenseFactorizationError::IncompatibleDimension);
+ }
+
+ // standard BLAS ?potrf arguments for computing
+ // cholesky decomposition
+ let uplo = MatrixTriangle::Triu.as_blas_char(); // only look at triu of A
+ let An = A.nrows().try_into().unwrap();
+ let a = A.data_mut();
+ let lda = An;
+ let info = &mut 0_i32; // output info
+
+ T::xpotrf(uplo, An, a, lda, info);
+
+ if *info != 0 {
+ return Err(DenseFactorizationError::Cholesky(*info));
+ }
+
+ // A will now have L^T in its upper triangle.
+ let At = A.t();
+ self.L.data_mut().set(T::zero());
+
+ let n = self.L.nrows();
+ for j in 0..n {
+ for i in j..n {
+ self.L[(i, j)] = At[(i, j)];
+ }
+ }
+
+ Ok(())
+ }
+}
+
+#[test]
+fn test_cholesky() {
+ use crate::algebra::{DenseMatrix, MultiplyGEMM, VectorMath};
+
+ let mut S = Matrix::new((3, 3), vec![8., -2., 4., -2., 12., 2., 4., 2., 6.]);
+ let Scopy = S.clone(); //S is corrupted after factorization
+
+ let mut eng = CholeskyEngine::::new(3);
+ assert!(eng.cholesky(&mut S).is_ok());
+
+ let mut M = Matrix::::zeros((3, 3));
+ M.mul(&eng.L, &eng.L.t(), 1.0, 0.0);
+
+ assert!(M.data().norm_inf_diff(Scopy.data()) < 1e-8);
+}
diff --git a/src/algebra/dense/core.rs b/src/algebra/dense/core.rs
new file mode 100644
index 00000000..89196318
--- /dev/null
+++ b/src/algebra/dense/core.rs
@@ -0,0 +1,292 @@
+use crate::algebra::*;
+use std::ops::{Index, IndexMut};
+
+/// Dense matrix in column major format
+///
+/// __Example usage__ : To construct the 3 x 3 matrix
+/// ```text
+/// A = [1. 3. 5.]
+/// [2. 0. 6.]
+/// [0. 4. 7.]
+/// ```
+///
+/// ```no_run
+/// use clarabel::algebra::Matrix;
+///
+/// let A : Matrix = Matrix::new(
+/// (3, 3), //size as tuple
+/// vec![1., 2., 0., 3., 0., 4., 5., 6., 7.]
+/// );
+///
+/// ```
+
+#[derive(Debug, Clone, PartialEq)]
+pub struct Matrix {
+ /// number of rows
+ pub m: usize,
+ ///number of columns
+ pub n: usize,
+ /// vector of data in column major formmat
+ pub data: Vec,
+}
+
+impl Matrix
+where
+ T: FloatT,
+{
+ pub fn zeros(size: (usize, usize)) -> Self {
+ let (m, n) = size;
+ let data = vec![T::zero(); m * n];
+ Self { m, n, data }
+ }
+
+ pub fn identity(n: usize) -> Self {
+ let mut mat = Matrix::zeros((n, n));
+ mat.set_identity();
+ mat
+ }
+
+ pub fn set_identity(&mut self) {
+ assert!(self.m == self.n);
+ self.data_mut().set(T::zero());
+ for i in 0..self.n {
+ self[(i, i)] = T::one();
+ }
+ }
+
+ pub fn new(size: (usize, usize), data: Vec) -> Self {
+ let (m, n) = size;
+ assert!(m * n == data.len());
+ Self { m, n, data }
+ }
+
+ pub fn new_from_slice(size: (usize, usize), src: &[T]) -> Self {
+ Self::new(size, src.to_vec())
+ }
+
+ pub fn copy_from_slice(&mut self, src: &[T]) -> &mut Self {
+ self.data.copy_from_slice(src);
+ self
+ }
+
+ pub fn data_mut(&mut self) -> &mut [T] {
+ &mut self.data
+ }
+
+ pub fn t(&self) -> Adjoint<'_, Self> {
+ Adjoint { src: self }
+ }
+
+ /// Returns a Symmetric view of a triu matrix
+ pub fn sym(&self) -> Symmetric<'_, Self> {
+ debug_assert!(self.is_triu());
+ Symmetric { src: self }
+ }
+
+ /// Set A = (A + A') / 2. Assumes A is real
+ pub fn symmetric_part(&mut self) -> &mut Self {
+ assert!(self.is_square());
+ let half: T = (0.5_f64).as_T();
+
+ for r in 0..self.m {
+ for c in 0..r {
+ let val = half * (self[(r, c)] + self[(c, r)]);
+ self[(c, r)] = val;
+ self[(r, c)] = val;
+ }
+ }
+ self
+ }
+
+ pub fn col_slice(&self, col: usize) -> &[T] {
+ assert!(col < self.n);
+ &self.data[(col * self.m)..(col + 1) * self.m]
+ }
+
+ pub fn col_slice_mut(&mut self, col: usize) -> &mut [T] {
+ assert!(col < self.n);
+ &mut self.data[(col * self.m)..(col + 1) * self.m]
+ }
+
+ pub fn is_triu(&self) -> bool {
+ for c in 0..self.ncols() {
+ for r in (c + 1)..self.nrows() {
+ if self[(r, c)] != T::zero() {
+ return false;
+ }
+ }
+ }
+ true
+ }
+}
+
+impl DenseMatrix for Matrix
+where
+ T: FloatT,
+{
+ type T = T;
+ fn index_linear(&self, idx: (usize, usize)) -> usize {
+ idx.0 + self.m * idx.1
+ }
+ fn data(&self) -> &[T] {
+ &self.data
+ }
+}
+
+impl<'a, T> DenseMatrix for ReshapedMatrix<'a, T>
+where
+ T: FloatT,
+{
+ type T = T;
+ fn index_linear(&self, idx: (usize, usize)) -> usize {
+ idx.0 + self.m * idx.1
+ }
+ fn data(&self) -> &[T] {
+ self.data
+ }
+}
+
+impl<'a, T> DenseMatrix for Adjoint<'a, Matrix>
+where
+ T: FloatT,
+{
+ type T = T;
+ #[inline]
+ fn index_linear(&self, idx: (usize, usize)) -> usize {
+ self.src.index_linear((idx.1, idx.0))
+ }
+ fn data(&self) -> &[T] {
+ &self.src.data
+ }
+}
+
+impl<'a, T> DenseMatrix for Symmetric<'a, Matrix>
+where
+ T: FloatT,
+{
+ type T = T;
+ #[inline]
+ fn index_linear(&self, idx: (usize, usize)) -> usize {
+ if idx.0 <= idx.1 {
+ //triu part
+ self.src.index_linear((idx.0, idx.1))
+ } else {
+ //tril part uses triu entry
+ self.src.index_linear((idx.1, idx.0))
+ }
+ }
+ fn data(&self) -> &[T] {
+ &self.src.data
+ }
+}
+
+impl<'a, T> ReshapedMatrix<'a, T>
+where
+ T: FloatT,
+{
+ pub fn from_slice(data: &'a [T], m: usize, n: usize) -> Self {
+ Self { data, m, n }
+ }
+
+ #[allow(dead_code)]
+ pub fn reshape(&mut self, size: (usize, usize)) -> &Self {
+ assert!(size.0 * size.1 == self.m * self.n);
+ self.m = size.0;
+ self.n = size.1;
+ self
+ }
+}
+
+impl IndexMut<(usize, usize)> for Matrix
+where
+ T: FloatT,
+{
+ fn index_mut(&mut self, idx: (usize, usize)) -> &mut Self::Output {
+ let lidx = self.index_linear(idx);
+ &mut self.data[lidx]
+ }
+}
+
+macro_rules! impl_mat_index {
+ ($mat:ty) => {
+ impl Index<(usize, usize)> for $mat {
+ type Output = T;
+ fn index(&self, idx: (usize, usize)) -> &Self::Output {
+ &self.data()[self.index_linear(idx)]
+ }
+ }
+ };
+}
+impl_mat_index!(Matrix);
+impl_mat_index!(ReshapedMatrix<'_, T>);
+impl_mat_index!(Adjoint<'_, Matrix>);
+impl_mat_index!(Symmetric<'_, Matrix>);
+
+impl ShapedMatrix for Matrix
+where
+ T: FloatT,
+{
+ fn nrows(&self) -> usize {
+ self.m
+ }
+ fn ncols(&self) -> usize {
+ self.n
+ }
+ fn shape(&self) -> MatrixShape {
+ MatrixShape::N
+ }
+}
+
+impl Symmetric<'_, Matrix>
+where
+ T: FloatT,
+{
+ pub(crate) fn pack_triu(&self, v: &mut [T]) {
+ let n = self.ncols();
+ let numel = triangular_number(n);
+ assert!(v.len() == numel);
+
+ let mut k = 0;
+ for col in 0..self.ncols() {
+ for row in 0..=col {
+ v[k] = self.src[(row, col)];
+ k += 1;
+ }
+ }
+ }
+}
+
+impl std::fmt::Display for Matrix
+where
+ T: FloatT,
+{
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ _display_matrix(self, f)
+ }
+}
+
+fn _display_matrix(m: &M, f: &mut std::fmt::Formatter) -> std::fmt::Result
+where
+ M: DenseMatrix,
+ M::Output: FloatT,
+{
+ writeln!(f)?;
+ for i in 0..m.nrows() {
+ write!(f, "[ ")?;
+ for j in 0..m.ncols() {
+ write!(f, " {:?}", m[(i, j)])?;
+ }
+ writeln!(f, "]")?;
+ }
+ writeln!(f)?;
+ Ok(())
+}
+
+#[test]
+fn test_matrix_istriu() {
+ use crate::algebra::Matrix;
+ let (m, n) = (3, 3);
+ let a = vec![1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0];
+ let mat = Matrix::new((m, n), a);
+ assert!(mat.is_triu());
+}
diff --git a/src/algebra/dense/gemm.rs b/src/algebra/dense/gemm.rs
new file mode 100644
index 00000000..93b59ade
--- /dev/null
+++ b/src/algebra/dense/gemm.rs
@@ -0,0 +1,78 @@
+#![allow(non_snake_case)]
+
+use crate::algebra::{DenseMatrix, FloatT, Matrix, MatrixShape, MultiplyGEMM, ShapedMatrix};
+
+impl MultiplyGEMM for Matrix
+where
+ T: FloatT,
+{
+ type T = T;
+ // implements self = C = αA*B + βC
+ fn mul(&mut self, A: &MATA, B: &MATB, α: T, β: T) -> &Self
+ where
+ MATA: DenseMatrix,
+ MATB: DenseMatrix,
+ {
+ assert!(A.ncols() == B.nrows() && self.nrows() == A.nrows() && self.ncols() == B.ncols());
+
+ if self.nrows() == 0 || self.ncols() == 0 {
+ return self;
+ }
+
+ // standard BLAS ?gemm arguments for computing
+ // general matrix-matrix multiply
+ let transA = A.shape().as_blas_char();
+ let transB = B.shape().as_blas_char();
+ let m = A.nrows().try_into().unwrap();
+ let n = B.ncols().try_into().unwrap();
+ let k = A.ncols().try_into().unwrap();
+ let lda = if A.shape() == MatrixShape::N { m } else { k };
+ let ldb = if B.shape() == MatrixShape::N { k } else { n };
+ let ldc = m;
+
+ T::xgemm(
+ transA,
+ transB,
+ m,
+ n,
+ k,
+ α,
+ A.data(),
+ lda,
+ B.data(),
+ ldb,
+ β,
+ self.data_mut(),
+ ldc,
+ );
+ self
+ }
+}
+
+#[test]
+fn test_gemm() {
+ let (m, n, k) = (2, 4, 3);
+ let a = vec![1.0, 4.0, 2.0, 5.0, 3.0, 6.0];
+ let b = vec![
+ 1.0, 5.0, 9.0, 2.0, 6.0, 10.0, 3.0, 7.0, 11.0, 4.0, 8.0, 12.0,
+ ];
+ let c = vec![2.0, 7.0, 6.0, 2.0, 0.0, 7.0, 4.0, 2.0];
+
+ let mut A = Matrix::zeros((m, k));
+ let mut B = Matrix::zeros((k, n));
+ let mut C = Matrix::::zeros((m, n));
+ A.copy_from_slice(&a);
+ B.copy_from_slice(&b);
+ C.copy_from_slice(&c);
+ C.mul(&A, &B, 1.0, 1.0);
+
+ assert!(C.data() == vec![40.0, 90.0, 50.0, 100.0, 50.0, 120.0, 60.0, 130.0]);
+
+ // new from slice and transposed multiply
+ let A = Matrix::new_from_slice((m, k), &a);
+ let B = Matrix::new_from_slice((k, n), &b);
+ let mut C = Matrix::::zeros((n, m));
+ C.mul(&B.t(), &A.t(), 1.0, 0.0);
+
+ assert!(C.data() == vec![38.0, 44.0, 50.0, 56.0, 83.0, 98.0, 113.0, 128.0]);
+}
diff --git a/src/algebra/dense/gemv.rs b/src/algebra/dense/gemv.rs
new file mode 100644
index 00000000..dd820ab3
--- /dev/null
+++ b/src/algebra/dense/gemv.rs
@@ -0,0 +1,74 @@
+#![allow(non_snake_case)]
+
+use crate::algebra::{
+ Adjoint, DenseMatrix, FloatT, Matrix, MatrixShape, MultiplyGEMV, ShapedMatrix,
+};
+
+impl MultiplyGEMV for Matrix
+where
+ T: FloatT,
+{
+ type T = T;
+ // implements y = αA*x + βy
+ fn gemv(&self, x: &[Self::T], y: &mut [Self::T], α: Self::T, β: Self::T) {
+ let (m, n) = self.size();
+ Matrix::::_gemv(self.shape(), m, n, α, self.data(), x, β, y);
+ }
+}
+
+impl<'a, T> MultiplyGEMV for Adjoint<'a, Matrix>
+where
+ T: FloatT,
+{
+ type T = T;
+ // implements y = αA'*x + βy
+ fn gemv(&self, x: &[Self::T], y: &mut [Self::T], α: Self::T, β: Self::T) {
+ let (m, n) = self.src.size(); //NB: size of A, not A'
+ Matrix::::_gemv(self.shape(), m, n, α, self.data(), x, β, y);
+ }
+}
+
+impl Matrix
+where
+ T: FloatT,
+{
+ #[allow(clippy::too_many_arguments)]
+ fn _gemv(trans: MatrixShape, m: usize, n: usize, α: T, a: &[T], x: &[T], β: T, y: &mut [T]) {
+ match trans {
+ MatrixShape::N => {
+ assert!(n == x.len() && m == y.len());
+ }
+ MatrixShape::T => {
+ assert!(m == x.len() && n == y.len());
+ }
+ }
+
+ // standard BLAS ?gemv arguments for computing matrix-vector product
+ let trans = trans.as_blas_char();
+ let m = m.try_into().unwrap();
+ let n = n.try_into().unwrap();
+ let lda = m;
+ let incx = 1;
+ let incy = 1;
+
+ T::xgemv(trans, m, n, α, a, lda, x, incx, β, y, incy);
+ }
+}
+
+#[test]
+fn test_gemv() {
+ use crate::algebra::Matrix;
+ let (m, n) = (2, 3);
+ let a = vec![1.0, 4.0, 2.0, 5.0, 3.0, 6.0];
+ let A = Matrix::new((m, n), a);
+
+ let x = vec![1., 2., 3.];
+ let mut y = vec![-1., -2.];
+ A.gemv(&x, &mut y, 2.0, 3.0);
+ assert!(y == [25.0, 58.0]);
+
+ let x = vec![1., 2.];
+ let mut y = vec![-1., -2., -3.];
+ A.t().gemv(&x, &mut y, 2.0, 3.0);
+ assert!(y == [15.0, 18.0, 21.0]);
+}
diff --git a/src/algebra/dense/kron.rs b/src/algebra/dense/kron.rs
new file mode 100644
index 00000000..a63e65d3
--- /dev/null
+++ b/src/algebra/dense/kron.rs
@@ -0,0 +1,54 @@
+#![allow(non_snake_case)]
+use crate::algebra::{DenseMatrix, FloatT, Matrix, ShapedMatrix};
+
+impl Matrix
+where
+ T: FloatT,
+{
+ pub(crate) fn kron(&mut self, A: &MATA, B: &MATB) -> &Self
+ where
+ MATB: DenseMatrix,
+ MATA: DenseMatrix,
+ {
+ let (pp, qq) = A.size();
+ let (rr, ss) = B.size();
+ assert!(self.nrows() == pp * rr);
+ assert!(self.ncols() == qq * ss);
+
+ let mut i = 0;
+ for q in 0..qq {
+ for s in 0..ss {
+ for p in 0..pp {
+ let Apq = A[(p, q)];
+ for r in 0..rr {
+ self.data_mut()[i] = Apq * B[(r, s)];
+ i += 1;
+ }
+ }
+ }
+ }
+ self
+ }
+}
+
+#[test]
+fn test_kron() {
+ let A = Matrix::new((2, 2), vec![1.0, 4.0, 2.0, 5.0]);
+ let B = Matrix::new((1, 2), vec![1.0, 2.0]);
+
+ let (k1, m1) = A.size();
+ let (k2, m2) = B.size();
+
+ let mut K = Matrix::::zeros((k1 * k2, m1 * m2));
+ K.kron(&A, &B);
+ assert!(K.data() == vec![1., 4., 2., 8., 2., 5., 4., 10.]);
+ K.kron(&A.t(), &B);
+ assert!(K.data() == vec![1., 2., 2., 4., 4., 5., 8., 10.]);
+
+ let (k2, m2) = B.t().size();
+ let mut K = Matrix::::zeros((k1 * k2, m1 * m2));
+ K.kron(&A, &B.t());
+ assert!(K.data() == vec![1., 2., 4., 8., 2., 4., 5., 10.]);
+ K.kron(&A.t(), &B.t());
+ assert!(K.data() == vec![1., 2., 2., 4., 4., 8., 5., 10.]);
+}
diff --git a/src/algebra/dense/matrix_math.rs b/src/algebra/dense/matrix_math.rs
new file mode 100644
index 00000000..1745e4af
--- /dev/null
+++ b/src/algebra/dense/matrix_math.rs
@@ -0,0 +1,170 @@
+#![allow(non_snake_case)]
+use crate::algebra::{FloatT, Matrix, MatrixMath, VectorMath};
+
+impl MatrixMath for Matrix {
+ type T = T;
+
+ //scalar mut operations
+ fn scale(&mut self, c: T) {
+ self.data.scale(c);
+ }
+
+ fn negate(&mut self) {
+ self.data.negate();
+ }
+
+ fn col_norms(&self, norms: &mut [T]) {
+ norms.fill(T::zero());
+ self.col_norms_no_reset(norms);
+ }
+
+ fn col_norms_no_reset(&self, norms: &mut [T]) {
+ for (i, norm) in norms.iter_mut().enumerate() {
+ let colnorm = self.col_slice(i).norm_inf();
+ *norm = Self::T::max(*norm, colnorm);
+ }
+ }
+
+ fn col_norms_sym(&self, norms: &mut [T]) {
+ norms.fill(T::zero());
+ self.col_norms_sym_no_reset(norms);
+ }
+
+ fn col_norms_sym_no_reset(&self, norms: &mut [T]) {
+ for c in 0..self.n {
+ for r in 0..=c {
+ let tmp = self[(r, c)];
+ norms[r] = T::max(norms[r], tmp);
+ norms[c] = T::max(norms[c], tmp);
+ }
+ }
+ }
+
+ fn row_norms(&self, norms: &mut [T]) {
+ norms.fill(T::zero());
+ self.row_norms_no_reset(norms);
+ }
+
+ fn row_norms_no_reset(&self, norms: &mut [T]) {
+ for r in 0..self.m {
+ for c in 0..self.n {
+ norms[r] = T::max(norms[r], T::abs(self[(r, c)]))
+ }
+ }
+ }
+
+ fn lscale(&mut self, l: &[T]) {
+ for col in 0..self.n {
+ self.col_slice_mut(col).hadamard(l);
+ }
+ }
+
+ fn rscale(&mut self, r: &[T]) {
+ for (col, val) in r.iter().enumerate() {
+ self.col_slice_mut(col).scale(*val);
+ }
+ }
+
+ fn lrscale(&mut self, l: &[T], r: &[T]) {
+ for i in 0..self.m {
+ for j in 0..self.n {
+ self[(i, j)] *= l[i] * r[j];
+ }
+ }
+ }
+
+ // PJG: this should probably only be implemented as part of
+ // some SymmetricMatrixMath trait. Uses upper triangle only
+ // and assumes that the rest is symmetric.
+ fn quad_form(&self, y: &[T], x: &[T]) -> T {
+ assert_eq!(self.m, self.n);
+ let mut out = T::zero();
+ for col in 0..self.n {
+ let mut tmp1 = T::zero();
+ let mut tmp2 = T::zero();
+ for row in 0..=col {
+ let Mv = self[(row, col)];
+ if row < col {
+ tmp1 += Mv * x[row];
+ tmp2 += Mv * y[row];
+ } else {
+ //diagonal term
+ out += Mv * x[col] * y[col];
+ }
+ }
+ out += tmp1 * y[col] + tmp2 * x[col];
+ }
+ out
+ }
+}
+
+#[test]
+fn test_quad_form() {
+ let mut A = Matrix::new((2, 2), vec![1.0, 4.0, 4.0, 5.0]);
+ let x = vec![1.0, 2.0];
+ let y = vec![3.0, 4.0];
+ println!("Quad form was {:?}", A.quad_form(&x, &y));
+ assert!(A.quad_form(&x, &y) == 83.0);
+
+ //remove lower triangle part and check again.
+ //should not change the result.
+ A[(1, 0)] = 0.0;
+ assert!(A.quad_form(&x, &y) == 83.0);
+}
+
+#[test]
+fn test_row_col_norms() {
+ //A =
+ //[-1 4 6]
+ //[ 3 -8 7]
+ //[ 0 4 9]
+
+ let A = Matrix::new((3, 3), vec![-1., 3., 0., 4., -8., 4., 6., 7., 9.]);
+
+ let mut rnorms = vec![0.0; 3];
+ let mut cnorms = vec![0.0; 3];
+
+ A.row_norms(&mut rnorms);
+ assert!(rnorms == [6.0, 8.0, 9.0]);
+ A.col_norms(&mut cnorms);
+ assert!(cnorms == [3.0, 8.0, 9.0]);
+
+ //no reset versions
+ let mut rnorms = vec![0.0; 3];
+ let mut cnorms = vec![0.0; 3];
+ rnorms[2] = 100.;
+ cnorms[2] = 100.;
+
+ A.row_norms_no_reset(&mut rnorms);
+ assert!(rnorms == [6.0, 8.0, 100.0]);
+ A.col_norms_no_reset(&mut cnorms);
+ assert!(cnorms == [3.0, 8.0, 100.0]);
+}
+
+#[test]
+fn test_l_r_scalings() {
+ //A =
+ //[-1 4 6]
+ //[ 3 -8 7]
+ //[ 0 4 9]
+
+ let A = Matrix::new((3, 3), vec![-1., 3., 0., 4., -8., 4., 6., 7., 9.]);
+
+ let lscale = vec![1., -2., 3.];
+ let rscale = vec![-2., 1., -3.];
+
+ //right scale
+ let mut B = A.clone();
+ B.rscale(&rscale);
+ assert!(B.data == [2., -6., 0., 4., -8., 4., -18., -21., -27.]);
+
+ //left scale
+ let mut B = A.clone();
+ B.lscale(&lscale);
+ assert!(B.data == [-1., -6., 0., 4., 16., 12., 6., -14., 27.]);
+
+ //left-right scale
+ let mut B = A.clone();
+ B.lrscale(&lscale, &rscale);
+ assert!(B.data == [2., 12., 0., 4., 16., 12., -18., 42., -81.]);
+}
diff --git a/src/algebra/dense/mod.rs b/src/algebra/dense/mod.rs
new file mode 100644
index 00000000..49744efe
--- /dev/null
+++ b/src/algebra/dense/mod.rs
@@ -0,0 +1,24 @@
+mod core;
+pub use self::core::*;
+mod block_concatenate;
+pub use block_concatenate::*;
+mod matrix_math;
+pub use matrix_math::*;
+
+mod blaslike_traits;
+pub(crate) use blaslike_traits::*;
+mod blas;
+pub(crate) use self::blas::*;
+mod cholesky;
+pub(crate) use cholesky::*;
+mod syevr;
+pub(crate) use syevr::*;
+mod svd;
+pub(crate) use svd::*;
+
+mod gemm;
+mod gemv;
+mod kron;
+mod symv;
+mod syr2k;
+mod syrk;
diff --git a/src/algebra/dense/svd.rs b/src/algebra/dense/svd.rs
new file mode 100644
index 00000000..8adcbe52
--- /dev/null
+++ b/src/algebra/dense/svd.rs
@@ -0,0 +1,145 @@
+#![allow(non_snake_case)]
+
+use crate::algebra::{DenseFactorizationError, FactorSVD, FloatT, Matrix, ShapedMatrix};
+use core::cmp::min;
+
+#[derive(PartialEq, Eq)]
+#[allow(dead_code)] //QRDecomposition is not used yet
+pub(crate) enum SVDEngineAlgorithm {
+ DivideAndConquer,
+ QRDecomposition,
+}
+
+const DEFAULT_SVD_ALGORITHM: SVDEngineAlgorithm = SVDEngineAlgorithm::DivideAndConquer;
+
+pub(crate) struct SVDEngine {
+ /// Computed singular values
+ pub s: Vec,
+
+ /// Left and right SVD matrices, each containing.
+ /// min(m,n) vectors. Note right singular vectors
+ /// are stored in transposed form.
+ pub U: Matrix,
+ pub Vt: Matrix,
+
+ // BLAS factorization method
+ pub algorithm: SVDEngineAlgorithm,
+
+ // BLAS workspace (allocated vecs only)
+ work: Vec,
+ iwork: Vec,
+}
+
+impl SVDEngine
+where
+ T: FloatT,
+{
+ pub fn new(size: (usize, usize)) -> Self {
+ let (m, n) = size;
+ let s = vec![T::zero(); min(m, n)];
+ let U = Matrix::::zeros((m, min(m, n)));
+ let Vt = Matrix::::zeros((min(m, n), n));
+ let work = vec![T::one()];
+ let iwork = vec![1];
+ let algorithm = DEFAULT_SVD_ALGORITHM;
+ Self {
+ s,
+ U,
+ Vt,
+ work,
+ iwork,
+ algorithm,
+ }
+ }
+}
+
+impl FactorSVD for SVDEngine
+where
+ T: FloatT,
+{
+ type T = T;
+ fn svd(&mut self, A: &mut Matrix) -> Result<(), DenseFactorizationError> {
+ let (m, n) = A.size();
+
+ if self.U.nrows() != m || self.Vt.ncols() != n {
+ return Err(DenseFactorizationError::IncompatibleDimension);
+ }
+
+ // standard BLAS ?gesdd and/or ?gesvd arguments for economy size SVD.
+
+ let job = b'S'; // compact.
+ let m = m.try_into().unwrap();
+ let n = n.try_into().unwrap();
+ let a = A.data_mut();
+ let lda = m;
+ let s = &mut self.s; // singular values go here
+ let u = self.U.data_mut(); // U data goes here
+ let ldu = m; // leading dim of U
+ let vt = self.Vt.data_mut(); // Vt data goes here
+ let ldvt = min(m, n); // leading dim of Vt
+ let work = &mut self.work;
+ let mut lwork = -1_i32; // -1 => config to request required work size
+ let iwork = &mut self.iwork;
+ let info = &mut 0_i32; // output info
+
+ for i in 0..2 {
+ // iwork is only used for the DivideAndConquer BLAS call
+ // and should always be 8*min(m,n) elements in that case.
+ // This will *not* shrink iwork in the case that the engines
+ // algorithm is switched back and forth
+ if self.algorithm == SVDEngineAlgorithm::DivideAndConquer {
+ iwork.resize(8 * min(m, n) as usize, 0);
+ }
+
+ // Two calls to BLAS. First one gets size for work.
+ match self.algorithm {
+ SVDEngineAlgorithm::DivideAndConquer => T::xgesdd(
+ job, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info,
+ ),
+ SVDEngineAlgorithm::QRDecomposition => T::xgesvd(
+ job, job, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, info,
+ ),
+ }
+ if *info != 0 {
+ return Err(DenseFactorizationError::SVD(*info));
+ }
+
+ // resize work vector and reset length
+ if i == 0 {
+ lwork = work[0].to_i32().unwrap();
+ work.resize(lwork as usize, T::zero());
+ }
+ }
+ Ok(())
+ }
+}
+
+#[test]
+fn test_svd() {
+ use crate::algebra::{DenseMatrix, MultiplyGEMM, VectorMath};
+
+ let mut A = Matrix::new((2, 3), vec![3., 2., 2., 3., 2., -2.]);
+ let Acopy = A.clone(); //A is corrupted after factorization
+
+ let mut eng = SVDEngine::::new((2, 3));
+ assert!(eng.svd(&mut A).is_ok());
+ let sol = [5., 3.];
+ assert!(eng.s.norm_inf_diff(&sol) < 1e-8);
+
+ let mut M = Matrix::::zeros((2, 3));
+
+ let U = &eng.U;
+ let s = &eng.s;
+ let Vt = &eng.Vt;
+
+ //reconstruct matrix from SVD
+ let mut Us = U.clone();
+ for c in 0..Us.ncols() {
+ for r in 0..Us.nrows() {
+ Us[(r, c)] *= s[c];
+ }
+ }
+ M.mul(&Us, Vt, 1.0, 0.0);
+
+ assert!(M.data().norm_inf_diff(Acopy.data()) < 1e-8);
+}
diff --git a/src/algebra/dense/syevr.rs b/src/algebra/dense/syevr.rs
new file mode 100644
index 00000000..890b8349
--- /dev/null
+++ b/src/algebra/dense/syevr.rs
@@ -0,0 +1,145 @@
+#![allow(non_snake_case)]
+
+use crate::algebra::{
+ DenseFactorizationError, FactorEigen, FloatT, Matrix, MatrixTriangle, ShapedMatrix,
+};
+
+pub(crate) struct EigEngine {
+ /// Computed eigenvalues in ascending order
+ pub λ: Vec,
+
+ /// Computed eigenvectors (optional)
+ pub V: Option>,
+
+ // BLAS workspace (allocated vecs only)
+ isuppz: Vec,
+ work: Vec,
+ iwork: Vec,
+}
+
+impl EigEngine
+where
+ T: FloatT,
+{
+ pub fn new(n: usize) -> Self {
+ let λ = vec![T::zero(); n];
+ let V = None;
+ let isuppz = vec![0; 2 * n];
+ let work = vec![T::one()];
+ let iwork = vec![1];
+ Self {
+ λ,
+ V,
+ isuppz,
+ work,
+ iwork,
+ }
+ }
+}
+
+impl FactorEigen for EigEngine
+where
+ T: FloatT,
+{
+ type T = T;
+ fn eigvals(&mut self, A: &mut Matrix) -> Result<(), DenseFactorizationError> {
+ self.syevr(A, b'N')
+ }
+ fn eigen(&mut self, A: &mut Matrix) -> Result<(), DenseFactorizationError> {
+ self.syevr(A, b'V')
+ }
+}
+
+impl EigEngine
+where
+ T: FloatT,
+{
+ fn syevr(&mut self, A: &mut Matrix, jobz: u8) -> Result<(), DenseFactorizationError> {
+ if !A.is_square() || A.nrows() != self.λ.len() {
+ return Err(DenseFactorizationError::IncompatibleDimension);
+ }
+ let An = A.nrows();
+
+ // allocate for eigenvectors on first request
+ if jobz == b'V' && self.V.is_none() {
+ self.V = Some(Matrix::::zeros((An, An)));
+ }
+
+ // target for computed eigenvectors (if any)
+ let mut Vfake = [T::zero()];
+ let Vdata = match self.V.as_mut() {
+ Some(V) => V.data_mut(),
+ None => &mut Vfake,
+ };
+
+ // standard BLAS ?symevr arguments for computing
+ // a full set of eigenvalues.
+ let jobz = jobz; // 'N' for values, 'V' for vecs/values
+ let range = b'A'; // compute all eigenvalues
+ let uplo = MatrixTriangle::Triu.as_blas_char(); // we always assume triu form
+ let n = An.try_into().unwrap();
+ let a = A.data_mut();
+ let lda = n;
+ let vl = T::zero(); // eig value lb (range = A => not used)
+ let vu = T::zero(); // eig value ub (range = A => not used)
+ let il = 0_i32; // eig interval lb (range = A => not used)
+ let iu = 0_i32; // eig interval lb (range = A => not used)
+ let abstol = -T::one(); // forces default tolerance
+ let m = &mut 0_i32; // returns # of computed eigenvalues
+ let w = &mut self.λ; // eigenvalues go here
+ let z = Vdata; // vectors go here
+ let ldz = n; // leading dim of eigenvector matrix
+ let isuppz = &mut self.isuppz;
+ let work = &mut self.work;
+ let mut lwork = -1_i32; // -1 => config to request required work size
+ let iwork = &mut self.iwork;
+ let mut liwork = -1_i32; // -1 => config to request required work size
+ let info = &mut 0_i32; // output info
+
+ for i in 0..2 {
+ T::xsyevr(
+ jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work,
+ lwork, iwork, liwork, info,
+ );
+ if *info != 0 {
+ return Err(DenseFactorizationError::Eigen(*info));
+ }
+ // resize work vectors and reset lengths
+ if i == 0 {
+ lwork = work[0].to_i32().unwrap();
+ liwork = iwork[0];
+ work.resize(lwork as usize, T::zero());
+ iwork.resize(liwork as usize, 0);
+ }
+ }
+ Ok(())
+ }
+}
+
+#[test]
+fn test_eigen() {
+ use crate::algebra::{DenseMatrix, MultiplyGEMM, VectorMath};
+
+ let mut S = Matrix::new((3, 3), vec![3., 2., 4., 2., 0., 2., 4., 2., 3.]);
+ let Scopy = S.clone(); //S is corrupted after factorization
+
+ let mut eng = EigEngine::::new(3);
+ assert!(eng.eigvals(&mut S).is_ok());
+ let sol = [-1.0, -1.0, 8.];
+ assert!(eng.λ.norm_inf_diff(&sol) < 1e-6);
+
+ let mut S = Scopy.clone(); //S is corrupted after factorization
+ assert!(eng.eigen(&mut S).is_ok());
+ let λ = &eng.λ;
+ let mut M = Matrix::::zeros((3, 3));
+ let V = eng.V.unwrap();
+ let mut Vs = V.clone();
+ for c in 0..3 {
+ for r in 0..3 {
+ Vs[(r, c)] *= λ[c];
+ }
+ }
+
+ M.mul(&Vs, &V.t(), 1.0, 0.0);
+ assert!(M.data().norm_inf_diff(Scopy.data()) < 1e-8);
+}
diff --git a/src/algebra/dense/symv.rs b/src/algebra/dense/symv.rs
new file mode 100644
index 00000000..b9407818
--- /dev/null
+++ b/src/algebra/dense/symv.rs
@@ -0,0 +1,39 @@
+#![allow(non_snake_case)]
+
+use crate::algebra::{
+ DenseMatrix, FloatT, Matrix, MatrixTriangle, MultiplySYMV, ShapedMatrix, Symmetric,
+};
+
+impl<'a, T> MultiplySYMV for Symmetric<'a, Matrix>
+where
+ T: FloatT,
+{
+ type T = T;
+ // implements y = αA*x + βy
+ fn symv(&self, x: &[Self::T], y: &mut [Self::T], α: Self::T, β: Self::T) {
+ let (m, n) = self.size();
+ assert!(m == n);
+
+ // standard BLAS ?symv arguments for computing matrix-vector product
+ let uplo = MatrixTriangle::Triu.as_blas_char();
+ let n = n.try_into().unwrap();
+ let a = self.src.data();
+ let lda = n;
+ let incx = 1;
+ let incy = 1;
+ T::xsymv(uplo, n, α, a, lda, x, incx, β, y, incy);
+ }
+}
+
+#[test]
+fn test_gsymv() {
+ use crate::algebra::Matrix;
+ let (m, n) = (3, 3);
+ let a = vec![1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0];
+ let A = Matrix::new((m, n), a);
+
+ let x = vec![1., -2., 3.];
+ let mut y = vec![-4., -1., 3.];
+ A.sym().symv(&x, &mut y, 2.0, 3.0);
+ assert!(y == [6.0, 19.0, 33.0]);
+}
diff --git a/src/algebra/dense/syr2k.rs b/src/algebra/dense/syr2k.rs
new file mode 100644
index 00000000..85d361bb
--- /dev/null
+++ b/src/algebra/dense/syr2k.rs
@@ -0,0 +1,65 @@
+#![allow(non_snake_case)]
+
+use crate::algebra::{
+ DenseMatrix, FloatT, Matrix, MatrixShape, MatrixTriangle, MultiplySYR2K, ShapedMatrix,
+};
+
+impl MultiplySYR2K for Matrix
+where
+ T: FloatT,
+{
+ type T = T;
+
+ // implements self = C = α(A*B' + B*A') + βC
+ fn syr2k(&mut self, A: &Matrix, B: &Matrix, α: T, β: T) -> &Self {
+ assert!(self.nrows() == A.nrows());
+ assert!(self.nrows() == B.nrows());
+ assert!(self.ncols() == B.nrows());
+ assert!(A.ncols() == B.ncols());
+
+ if self.nrows() == 0 {
+ return self;
+ }
+
+ let n = self.nrows().try_into().unwrap();
+ let k = A.ncols().try_into().unwrap();
+ let a = A.data();
+ let lda = n;
+ let b = B.data();
+ let ldb = n;
+ let c = self.data_mut();
+ let ldc = n;
+
+ T::xsyr2k(
+ MatrixTriangle::Triu.as_blas_char(),
+ MatrixShape::N.as_blas_char(),
+ n,
+ k,
+ α,
+ a,
+ lda,
+ b,
+ ldb,
+ β,
+ c,
+ ldc,
+ );
+ self
+ }
+}
+
+#[test]
+fn test_syr2k() {
+ let (m, n) = (3, 2);
+ let a = vec![1., -4., 2., -5., 3., 6.];
+ let A = Matrix::new((m, n), a);
+ let b = vec![4., 2., -3., 5., -2., -2.];
+ let B = Matrix::new((m, n), b);
+
+ let mut C = Matrix::::identity(m);
+
+ //NB: modifies upper triangle only
+ C.syr2k(&A, &B, 2., 1.);
+
+ assert!(C.data() == [-83., 0., 0., 22., -55., 0., 90., -4., -71.]);
+}
diff --git a/src/algebra/dense/syrk.rs b/src/algebra/dense/syrk.rs
new file mode 100644
index 00000000..2e0eea05
--- /dev/null
+++ b/src/algebra/dense/syrk.rs
@@ -0,0 +1,64 @@
+#![allow(non_snake_case)]
+
+use crate::algebra::{
+ DenseMatrix, FloatT, Matrix, MatrixShape, MatrixTriangle, MultiplySYRK, ShapedMatrix,
+};
+
+impl MultiplySYRK for Matrix
+where
+ T: FloatT,
+{
+ type T = T;
+
+ // implements self = C = αA*A' + βC
+ fn syrk(&mut self, A: &MATA, α: T, β: T) -> &Self
+ where
+ MATA: DenseMatrix,
+ {
+ assert!(self.nrows() == A.nrows());
+ assert!(self.ncols() == A.nrows());
+
+ if self.nrows() == 0 {
+ return self;
+ }
+
+ let uplo = MatrixTriangle::Triu.as_blas_char();
+ let transA = A.shape().as_blas_char();
+ let n = A.nrows().try_into().unwrap();
+ let k = A.ncols().try_into().unwrap();
+ let lda = if A.shape() == MatrixShape::N { n } else { k };
+ let ldc = n;
+
+ T::xsyrk(
+ uplo,
+ transA,
+ n,
+ k,
+ α,
+ A.data(),
+ lda,
+ β,
+ self.data_mut(),
+ ldc,
+ );
+ self
+ }
+}
+
+#[test]
+fn test_syrk() {
+ let (m, n) = (2, 3);
+ let A = Matrix::new((m, n), vec![1.0, 4.0, 2.0, 5.0, 3.0, 6.0]);
+
+ let mut AAt = Matrix::::zeros((m, m));
+ AAt.syrk(&A, 1.0, 0.0);
+ //NB: fills upper triangle only
+ assert!(AAt.data() == [14.0, 0.0, 32.0, 77.0]);
+
+ let mut AtA = Matrix::::zeros((n, n));
+ AtA.data_mut().fill(1.0);
+ AtA.syrk(&A.t(), 2.0, 1.0);
+ //NB: fills upper triangle only
+ println!("AtA = {}", AtA);
+ assert!(AtA.data() == [35.0, 1.0, 1.0, 45.0, 59.0, 1.0, 55.0, 73.0, 91.0]);
+}
diff --git a/src/algebra/matrix_dense_3.rs b/src/algebra/densesym3x3/mod.rs
similarity index 91%
rename from src/algebra/matrix_dense_3.rs
rename to src/algebra/densesym3x3/mod.rs
index 5315589e..449c47fd 100644
--- a/src/algebra/matrix_dense_3.rs
+++ b/src/algebra/densesym3x3/mod.rs
@@ -1,15 +1,12 @@
#![allow(non_snake_case)]
-use super::{FloatT, VectorMath};
+use super::FloatT;
use crate::algebra::*;
use std::ops::{Index, IndexMut};
-// Dense matrix types are restricted to the crate
-// NB: Implements a basic symmetric 3x3 container
-// to support power and exponential cones.
-// For PSDs we will want something more general that
-// is compatible (interchangeably) with nalgebra /
-// ndarray / some other blas like interface
+// 3x3 Dense matrix types are restricted to the crate
+// NB: Implements a symmetric 3x3 type to support
+// power and exponential cones.
//
// Data is stored as an array of 6 values belonging
// the upper triangle of a 3x3 matrix. Lower triangle
@@ -49,7 +46,7 @@ where
pub fn mul(&self, y: &mut [T], x: &[T]) {
let H = self;
- //matrix is packed 3x3, so unroll it here
+ //matrix is packed triu of a 3x3, so unroll it here
y[0] = (H.data[0] * x[0]) + (H.data[1] * x[1]) + (H.data[3] * x[2]);
y[1] = (H.data[1] * x[0]) + (H.data[2] * x[1]) + (H.data[4] * x[2]);
y[2] = (H.data[3] * x[0]) + (H.data[4] * x[1]) + (H.data[5] * x[2]);
@@ -64,7 +61,7 @@ where
pub fn norm_fro(&self) -> T {
let d = self.data;
//Frobenius norm. Need to be careful to count
- //off diagonals twice
+ //the packed off diagonals twice
let mut sumsq = T::zero();
//diagonal terms
@@ -87,7 +84,7 @@ where
}
pub fn copy_from(&mut self, src: &Self) {
- self.data.copy_from(&src.data);
+ self.data.copy_from_slice(&src.data);
}
//convert row col coordinate to triu index
@@ -95,9 +92,9 @@ where
pub fn index_linear(idx: (usize, usize)) -> usize {
let (r, c) = idx;
if r < c {
- r + ((c * (c + 1)) >> 1)
+ r + triangular_number(c)
} else {
- c + ((r * (r + 1)) >> 1)
+ c + triangular_number(r)
}
}
@@ -190,7 +187,7 @@ fn test_3x3_matrix_index() {
let data = [1., 2., 3., 4., 5., 6.];
assert!(
- H.data.iter().zip(data.iter()).all(|(a, b)| a == b),
+ std::iter::zip(H.data, data).all(|(a, b)| a == b),
"Arrays are not equal"
);
diff --git a/src/algebra/error_types.rs b/src/algebra/error_types.rs
new file mode 100644
index 00000000..1bcc6835
--- /dev/null
+++ b/src/algebra/error_types.rs
@@ -0,0 +1,28 @@
+use thiserror::Error;
+
+/// Error type returned by the [`check_format`](crate::algebra::CscMatrix::check_format) utility.
+#[derive(Error, Debug)]
+pub enum SparseFormatError {
+ #[error("Matrix dimension fields and/or array lengths are incompatible")]
+ IncompatibleDimension,
+ #[error("Data is not sorted by row index within each column")]
+ BadRowOrdering,
+ #[error("Row value exceeds the matrix row dimension")]
+ BadRowval,
+ #[error("Bad column pointer values")]
+ BadColptr,
+}
+
+/// Error type returned by BLAS-like dense factorization routines. Errors
+/// return the internal BLAS error codes.
+#[derive(Error, Debug)]
+pub enum DenseFactorizationError {
+ #[error("Matrix dimension fields and/or array lengths are incompatible")]
+ IncompatibleDimension,
+ #[error("Eigendecomposition error")]
+ Eigen(i32),
+ #[error("SVD error")]
+ SVD(i32),
+ #[error("Cholesky error")]
+ Cholesky(i32),
+}
diff --git a/src/algebra/floats.rs b/src/algebra/floats.rs
index 7a34fe7b..5156f489 100644
--- a/src/algebra/floats.rs
+++ b/src/algebra/floats.rs
@@ -1,16 +1,15 @@
use num_traits::{Float, FloatConst, FromPrimitive, NumAssign};
+use std::fmt::{Debug, Display, LowerExp};
-/// Trait for floating point types used in the Clarabel solver.
-///
-/// All floating point calculations in Clarabel are represented internally on values
-/// implementing the `FloatT` trait, with implementations provided for f32 and f64
-/// native types. It should be possible to compile Clarabel to support any any other
-/// floating point type provided that it satisfies the trait bounds of
-/// `FloatT`.
-///
-/// `FloatT` relies on [`num_traits`](num_traits) for most of its constituent trait bounds.
+#[cfg(feature = "sdp")]
+use crate::algebra::dense::BlasFloatT;
-pub trait FloatT:
+/// Core traits for internal floating point values.
+///
+/// This trait defines a subset of bounds for `FloatT`, which is preferred
+/// throughout for use in the solver. When the "sdp" feature is enabled,
+/// `FloatT` is additionally restricted to f32/f64 types supported by BLAS.
+pub trait CoreFloatT:
'static
+ Send
+ Float
@@ -18,13 +17,56 @@ pub trait FloatT:
+ NumAssign
+ Default
+ FromPrimitive
- + std::fmt::Display
- + std::fmt::LowerExp
- + std::fmt::Debug
+ + Display
+ + LowerExp
+ + Debug
+ + Sized
+{
+}
+
+impl CoreFloatT for T where
+ T: 'static
+ + Send
+ + Float
+ + FloatConst
+ + NumAssign
+ + Default
+ + FromPrimitive
+ + Display
+ + LowerExp
+ + Debug
+ + Sized
{
}
-impl FloatT for f32 {}
-impl FloatT for f64 {}
+
+// if "sdp" is enabled, we must add an additional trait
+// trait bound to restrict compilation for f32/f64 types
+// since there is no BLAS support otherwise
+
+cfg_if::cfg_if! {
+ if #[cfg(not(feature="sdp"))] {
+ /// Main trait for floating point types used in the Clarabel solver.
+ ///
+ /// All floating point calculations in Clarabel are represented internally on values
+ /// implementing the `FloatT` trait, with implementations provided only for f32 and f64
+ /// native types when compiled with BLAS/LAPACK support for SDPs. If SDP support is not
+ /// enabled then it should be possible to compile Clarabel to support any any other
+ /// floating point type provided that it satisfies the trait bounds of `CoreFloatT`.
+ ///
+ /// `FloatT` relies on [`num_traits`](num_traits) for most of its constituent trait bounds.
+ pub trait FloatT: CoreFloatT {}
+ } else{
+ pub trait FloatT: CoreFloatT + BlasFloatT {}
+ }
+}
+
+cfg_if::cfg_if! {
+ if #[cfg(feature="sdp")] {
+ impl FloatT for T where T: CoreFloatT + BlasFloatT {}
+ } else{
+ impl FloatT for T where T: CoreFloatT {}
+ }
+}
/// Trait for convering Rust primitives to [`FloatT`](crate::algebra::FloatT)
///
@@ -43,7 +85,7 @@ pub trait AsFloatT: 'static {
fn as_T(&self) -> T;
}
-macro_rules! impl_as_T {
+macro_rules! impl_as_FloatT {
($ty:ty, $ident:ident) => {
impl AsFloatT for $ty
where
@@ -56,8 +98,8 @@ macro_rules! impl_as_T {
}
};
}
-impl_as_T!(u32, from_u32);
-impl_as_T!(u64, from_u64);
-impl_as_T!(usize, from_usize);
-impl_as_T!(f32, from_f32);
-impl_as_T!(f64, from_f64);
+impl_as_FloatT!(u32, from_u32);
+impl_as_FloatT!(u64, from_u64);
+impl_as_FloatT!(usize, from_usize);
+impl_as_FloatT!(f32, from_f32);
+impl_as_FloatT!(f64, from_f64);
diff --git a/src/algebra/traits.rs b/src/algebra/math_traits.rs
similarity index 59%
rename from src/algebra/traits.rs
rename to src/algebra/math_traits.rs
index f35e9ce7..95e4a00e 100644
--- a/src/algebra/traits.rs
+++ b/src/algebra/math_traits.rs
@@ -1,4 +1,4 @@
-use super::{MatrixShape, MatrixTriangle};
+use super::FloatT;
// All internal math for all solver implementations should go
// through these core traits, which are implemented generically
@@ -6,44 +6,53 @@ use super::{MatrixShape, MatrixTriangle};
/// Scalar operations on [`FloatT`](crate::algebra::FloatT)
-pub trait ScalarMath {
+pub trait ScalarMath {
+ type T: FloatT;
/// Applies a threshold value.
///
/// If `s < min_thresh`, it is assigned the new value `min_new`.
///
/// If `s > max_thresh`, it assigned the new value `max_new`.
- fn clip(&self, min_thresh: T, max_thresh: T, min_new: T, max_new: T) -> T;
+ fn clip(
+ &self,
+ min_thresh: Self::T,
+ max_thresh: Self::T,
+ min_new: Self::T,
+ max_new: Self::T,
+ ) -> Self::T;
/// Safe calculation for log barriers.
///
/// Returns log(s) if s > 0 -Infinity otherwise.
- fn logsafe(&self) -> T;
+ fn logsafe(&self) -> Self::T;
}
/// Vector operations on slices of [`FloatT`](crate::algebra::FloatT)
-pub trait VectorMath {
+pub trait VectorMath {
+ type T;
+
/// Copy values from `src` to `self`
fn copy_from(&mut self, src: &Self) -> &mut Self;
/// Make a new vector from a subset of elements
- fn select(&self, index: &[bool]) -> Vec;
+ fn select(&self, index: &[bool]) -> Vec;
/// Apply an elementwise operation on a vector.
- fn scalarop(&mut self, op: impl Fn(T) -> T) -> &mut Self;
+ fn scalarop(&mut self, op: impl Fn(Self::T) -> Self::T) -> &mut Self;
/// Apply an elementwise operation to `v` and assign the
/// results to `self`.
- fn scalarop_from(&mut self, op: impl Fn(T) -> T, v: &Self) -> &mut Self;
+ fn scalarop_from(&mut self, op: impl Fn(Self::T) -> Self::T, v: &Self) -> &mut Self;
/// Elementwise translation.
- fn translate(&mut self, c: T) -> &mut Self;
+ fn translate(&mut self, c: Self::T) -> &mut Self;
/// set all elements to the same value
- fn set(&mut self, c: T) -> &mut Self;
+ fn set(&mut self, c: Self::T) -> &mut Self;
/// Elementwise scaling.
- fn scale(&mut self, c: T) -> &mut Self;
+ fn scale(&mut self, c: Self::T) -> &mut Self;
/// Elementwise reciprocal.
fn recip(&mut self) -> &mut Self;
@@ -61,43 +70,57 @@ pub trait VectorMath {
fn hadamard(&mut self, y: &Self) -> &mut Self;
/// Vector version of [clip](crate::algebra::ScalarMath::clip)
- fn clip(&mut self, min_thresh: T, max_thresh: T, min_new: T, max_new: T) -> &mut Self;
+ fn clip(
+ &mut self,
+ min_thresh: Self::T,
+ max_thresh: Self::T,
+ min_new: Self::T,
+ max_new: Self::T,
+ ) -> &mut Self;
/// Normalize, returning the norm. Do nothing if norm == 0.
- fn normalize(&mut self) -> T;
+ fn normalize(&mut self) -> Self::T;
/// Dot product
- fn dot(&self, y: &Self) -> T;
+ fn dot(&self, y: &Self) -> Self::T;
// computes dot(z + αdz,s + αds) without intermediate allocation
- fn dot_shifted(z: &[T], s: &[T], dz: &[T], ds: &[T], α: T) -> T;
+ fn dot_shifted(
+ z: &[Self::T],
+ s: &[Self::T],
+ dz: &[Self::T],
+ ds: &[Self::T],
+ α: Self::T,
+ ) -> Self::T;
/// Standard Euclidian or 2-norm distance from `self` to `y`
- fn dist(&self, y: &Self) -> T;
+ fn dist(&self, y: &Self) -> Self::T;
/// Sum of elements squared.
- fn sumsq(&self) -> T;
+ fn sumsq(&self) -> Self::T;
/// 2-norm
- fn norm(&self) -> T;
+ fn norm(&self) -> Self::T;
/// 2-norm of an elementwise scaling of `self` by `v`
- fn norm_scaled(&self, v: &Self) -> T;
+ fn norm_scaled(&self, v: &Self) -> Self::T;
/// Infinity norm
- fn norm_inf(&self) -> T;
+ fn norm_inf(&self) -> Self::T;
/// One norm
- fn norm_one(&self) -> T;
+ fn norm_one(&self) -> Self::T;
+
+ fn norm_inf_diff(&self, b: &Self) -> Self::T;
/// Minimum value in vector
- fn minimum(&self) -> T;
+ fn minimum(&self) -> Self::T;
/// Maximum value in vector
- fn maximum(&self) -> T;
+ fn maximum(&self) -> Self::T;
/// Mean value in vector
- fn mean(&self) -> T;
+ fn mean(&self) -> Self::T;
/// Checks if all elements are finite, i.e. no Infs or NaNs
fn is_finite(&self) -> bool;
@@ -106,69 +129,82 @@ pub trait VectorMath {
//--------------------
/// BLAS-like shift and scale in place. Produces `self = a*x+b*self`
- fn axpby(&mut self, a: T, x: &Self, b: T) -> &mut Self;
+ fn axpby(&mut self, a: Self::T, x: &Self, b: Self::T) -> &mut Self;
/// BLAS-like shift and scale, non in-place version. Produces `self = a*x+b*y`
- fn waxpby(&mut self, a: T, x: &Self, b: T, y: &Self) -> &mut Self;
+ fn waxpby(&mut self, a: Self::T, x: &Self, b: Self::T, y: &Self) -> &mut Self;
}
/// Matrix operations for matrices of [`FloatT`](crate::algebra::FloatT)
-pub trait MatrixMath {
+pub(crate) trait MatrixVectorMultiply {
+ type T: FloatT;
+
+ /// BLAS-like general matrix-vector multiply. Produces `y = a*self*x + b*y`
+ fn gemv(&self, y: &mut [Self::T], x: &[Self::T], a: Self::T, b: Self::T);
+}
+
+pub(crate) trait SymMatrixVectorMultiply {
+ type T: FloatT;
+
+ /// BLAS-like symmetric matrix-vector multiply. Produces `y = a*self*x + b*y`.
+ /// The matrix source data should be triu.
+ fn symv(&self, y: &mut [Self::T], x: &[Self::T], a: Self::T, b: Self::T);
+}
+
+/// Operations on matrices of [`FloatT`](crate::algebra::FloatT)
+
+pub trait MatrixMath {
+ type T: FloatT;
+
/// Compute columnwise infinity norm operations on
/// a matrix and assign the results to the vector `norms`
- fn col_norms(&self, norms: &mut V);
+ fn col_norms(&self, norms: &mut [Self::T]);
/// Compute columnwise infinity norm operations on
/// a matrix and assign the results to the vector `norms`.
/// In the `no_reset` version of this function, if `norms[i]`
/// is already larger the norm of the $i^{th}$ columns, then
/// its value is not changed
- fn col_norms_no_reset(&self, norms: &mut V);
+ fn col_norms_no_reset(&self, norms: &mut [Self::T]);
/// Compute columnwise infinity norm operations on
/// a symmstric matrix
- fn col_norms_sym(&self, norms: &mut V);
+ fn col_norms_sym(&self, norms: &mut [Self::T]);
/// Compute columnwise infinity norm operations on
/// a symmetric matrix without reset
- fn col_norms_sym_no_reset(&self, norms: &mut V);
+ fn col_norms_sym_no_reset(&self, norms: &mut [Self::T]);
/// Compute rowwise infinity norm operations on
/// a matrix and assign the results to the vector `norms`
- fn row_norms(&self, norms: &mut V);
+ fn row_norms(&self, norms: &mut [Self::T]);
/// Compute rowwise infinity norm operations on
/// a matrix and assign the results to the vector `norms`
/// without reset
- fn row_norms_no_reset(&self, norms: &mut V);
+ fn row_norms_no_reset(&self, norms: &mut [Self::T]);
/// Elementwise scaling
- fn scale(&mut self, c: T);
+ fn scale(&mut self, c: Self::T);
/// Elementwise negation
fn negate(&mut self);
/// Left multiply the matrix `self` by `Diagonal(l)`
- fn lscale(&mut self, l: &V);
+ fn lscale(&mut self, l: &[Self::T]);
/// Right multiply the matrix self by `Diagonal(r)`
- fn rscale(&mut self, r: &V);
+ fn rscale(&mut self, r: &[Self::T]);
/// Left and multiply the matrix self by diagonal matrices,
/// producing `A = Diagonal(l)*A*Diagonal(r)`
- fn lrscale(&mut self, l: &V, r: &V);
-
- /// BLAS-like general matrix-vector multiply. Produces `y = a*self*x + b*y`
- fn gemv(&self, y: &mut V, trans: MatrixShape, x: &V, a: T, b: T);
-
- /// BLAS-like symmetric matrix-vector multiply. Produces `y = a*self*x + b*y`.
- /// The matrix should be in either triu or tril form, with the other
- /// half of the triangle assumed symmetric
- fn symv(&self, y: &mut [T], tri: MatrixTriangle, x: &[T], a: T, b: T);
+ fn lrscale(&mut self, l: &[Self::T], r: &[Self::T]);
/// Quadratic form for a symmetric matrix. Assumes that the
/// matrix `M = self` is in upper triangular form, and produces
/// `y^T*M*x`
- fn quad_form(&self, y: &[T], x: &[T]) -> T;
+ ///
+ /// PJG: Maybe this should be on symmetric only.
+ fn quad_form(&self, y: &[Self::T], x: &[Self::T]) -> Self::T;
}
diff --git a/src/algebra/matrix_traits.rs b/src/algebra/matrix_traits.rs
new file mode 100644
index 00000000..f3de10d3
--- /dev/null
+++ b/src/algebra/matrix_traits.rs
@@ -0,0 +1,52 @@
+#![allow(non_snake_case)]
+
+use std::ops::Index;
+
+use crate::algebra::MatrixShape;
+
+pub(crate) trait ShapedMatrix {
+ fn nrows(&self) -> usize;
+ fn ncols(&self) -> usize;
+ fn shape(&self) -> MatrixShape;
+ fn size(&self) -> (usize, usize) {
+ (self.nrows(), self.ncols())
+ }
+ fn is_square(&self) -> bool {
+ self.nrows() == self.ncols()
+ }
+}
+
+//NB: the concrete dense type is just called "Matrix". The "DenseMatrix" trait
+//is implemented on Matrix, Adjoint and ReshapedMatrix to allow for indexing
+//of values in any of those format. This follows the Julia naming convention
+//for similar types.
+pub(crate) trait DenseMatrix: ShapedMatrix + Index<(usize, usize)> {
+ type T;
+ fn index_linear(&self, idx: (usize, usize)) -> usize;
+ fn data(&self) -> &[Self::T];
+}
+
+/// Blockwise matrix concatenation
+pub trait BlockConcatenate {
+ /// horizontal matrix concatenation
+ ///
+ /// ```text
+ /// C = [A B]
+ /// ```
+ /// # Panics
+ /// Panics if row dimensions are incompatible
+
+ fn hcat(A: &Self, B: &Self) -> Self;
+
+ /// vertical matrix concatenation
+ ///
+ /// ```text
+ /// C = [ A ]
+ /// [ B ]
+ /// ```
+ ///
+ /// # Panics
+ /// Panics if column dimensions are incompatible
+
+ fn vcat(A: &Self, B: &Self) -> Self;
+}
diff --git a/src/algebra/matrix_types.rs b/src/algebra/matrix_types.rs
index bd01a0e9..f0242fa7 100644
--- a/src/algebra/matrix_types.rs
+++ b/src/algebra/matrix_types.rs
@@ -2,52 +2,35 @@
// solver and math implementations are in standard
// compressed sparse column format, as is the API.
-/// Sparse matrix in standard Compressed Sparse Column (CSC) format
-///
-/// __Example usage__ : To construct the 3 x 2 matrix
-/// ```text
-/// A = [1. 3. 5.]
-/// [2. 0. 6.]
-/// [0. 4. 7.]
-/// ```
-///
-/// ```
-/// # use clarabel::algebra::CscMatrix;
-///
-/// let A : CscMatrix = CscMatrix::new(
-/// 3, // m
-/// 3, // n
-/// vec![0, 2, 4, 7], //colptr
-/// vec![0, 1, 0, 2, 0, 1, 2], //rowval
-/// vec![1., 2., 3., 4., 5., 6., 7.], //nzval
-/// );
-///
-/// // optional correctness check
-/// assert!(A.check_format().is_ok());
-///
-/// ```
-///
+/// Matrix shape marker for triangular matrices
+#[derive(PartialEq, Eq, Copy, Clone)]
+pub enum MatrixTriangle {
+ /// Upper triangular matrix
+ Triu,
+ /// Lower triangular matrix
+ Tril,
+}
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub struct CscMatrix {
- /// number of rows
- pub m: usize,
- /// number of columns
- pub n: usize,
- /// CSC format column pointer.
- ///
- /// Ths field should have length `n+1`. The last entry corresponds
- /// to the the number of nonzeros and should agree with the lengths
- /// of the `rowval` and `nzval` fields.
- pub colptr: Vec,
- /// vector of row indices
- pub rowval: Vec,
- /// vector of non-zero matrix elements
- pub nzval: Vec,
+/// Matrix triangular form marker
+impl MatrixTriangle {
+ /// convert to u8 character for BLAS calls
+ pub fn as_blas_char(&self) -> u8 {
+ match self {
+ MatrixTriangle::Triu => b'U',
+ MatrixTriangle::Tril => b'L',
+ }
+ }
+ /// transpose
+ pub fn t(&self) -> Self {
+ match self {
+ MatrixTriangle::Triu => MatrixTriangle::Tril,
+ MatrixTriangle::Tril => MatrixTriangle::Triu,
+ }
+ }
}
/// Matrix orientation marker
-#[derive(PartialEq, Eq, Copy, Clone)]
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum MatrixShape {
/// Normal matrix orientation
N,
@@ -55,11 +38,42 @@ pub enum MatrixShape {
T,
}
-/// Matrix shape marker for triangular matrices
-#[derive(PartialEq, Eq, Copy, Clone)]
-pub enum MatrixTriangle {
- /// Upper triangular matrix
- Triu,
- /// Lower triangular matrix
- Tril,
+impl MatrixShape {
+ /// convert to u8 character for BLAS calls
+ pub fn as_blas_char(&self) -> u8 {
+ match self {
+ MatrixShape::N => b'N',
+ MatrixShape::T => b'T',
+ }
+ }
+ /// transpose
+ pub fn t(&self) -> Self {
+ match self {
+ MatrixShape::N => MatrixShape::T,
+ MatrixShape::T => MatrixShape::N,
+ }
+ }
+}
+
+/// Adjoint of a matrix
+#[derive(Debug, Clone, PartialEq)]
+pub struct Adjoint<'a, M> {
+ pub src: &'a M,
+}
+/// Symmetric view of a matrix. Only the upper
+/// triangle of the source matrix will be referenced.
+#[derive(Debug, Clone, PartialEq)]
+pub struct Symmetric<'a, M> {
+ pub src: &'a M,
+}
+
+/// Borrowed data slice reshaped into a matrix.
+#[derive(Debug, Clone, PartialEq)]
+pub(crate) struct ReshapedMatrix<'a, T> {
+ /// number of rows
+ pub m: usize,
+ ///number of columns
+ pub n: usize,
+ ///borrowed data
+ pub data: &'a [T],
}
diff --git a/src/algebra/matrix_utils.rs b/src/algebra/matrix_utils.rs
deleted file mode 100644
index a118eaa1..00000000
--- a/src/algebra/matrix_utils.rs
+++ /dev/null
@@ -1,572 +0,0 @@
-#![allow(non_snake_case)]
-use super::{CscMatrix, FloatT, MatrixShape, MatrixTriangle};
-use thiserror::Error;
-
-/// Error type returned by the [`check_format`](crate::algebra::CscMatrix::check_format) utility.
-#[derive(Error, Debug)]
-pub enum SparseFormatError {
- #[error("Matrix dimension fields and/or array lengths are incompatible")]
- IncompatibleDimension,
- #[error("Data is not sorted by row index within each column")]
- BadRowOrdering,
- #[error("Row value exceeds the matrix row dimension")]
- BadRowval,
- #[error("Bad column pointer values")]
- BadColptr,
-}
-
-impl CscMatrix
-where
- T: FloatT,
-{
- /// `CscMatrix` constructor.
- ///
- /// # Panics
- /// Makes rudimentary dimensional compatibility checks and panics on
- /// failure. This constructor does __not__
- /// ensure that rows indices are all in bounds or that data is arranged
- /// such that entries within each column appear in order of increasing
- /// row index. Responsibility for ensuring these conditions hold
- /// is left to the caller.
- ///
-
- pub fn new(m: usize, n: usize, colptr: Vec, rowval: Vec, nzval: Vec) -> Self {
- assert_eq!(rowval.len(), nzval.len());
- assert_eq!(colptr.len(), n + 1);
- assert_eq!(colptr[n], rowval.len());
- CscMatrix {
- m,
- n,
- colptr,
- rowval,
- nzval,
- }
- }
-
- /// number of rows
- pub fn nrows(&self) -> usize {
- self.m
- }
- /// number of columns
- pub fn ncols(&self) -> usize {
- self.n
- }
- /// matrix size as (nrows,ncols) pair
- pub fn size(&self) -> (usize, usize) {
- (self.m, self.n)
- }
- /// number of nonzeros
- pub fn nnz(&self) -> usize {
- self.colptr[self.n]
- }
- /// true if `self.nrows() == self.ncols()`
- pub fn is_square(&self) -> bool {
- self.m == self.n
- }
-
- /// Check that matrix data is correctly formatted.
- pub fn check_format(&self) -> Result<(), SparseFormatError> {
- if self.rowval.len() != self.nzval.len() {
- return Err(SparseFormatError::IncompatibleDimension);
- }
-
- if self.colptr.is_empty()
- || (self.colptr.len() - 1) != self.n
- || self.colptr[self.n] != self.rowval.len()
- {
- return Err(SparseFormatError::IncompatibleDimension);
- }
-
- //check for colptr monotonicity
- if self.colptr.windows(2).any(|c| c[0] > c[1]) {
- return Err(SparseFormatError::BadColptr);
- }
-
- //check for rowval monotonicity within each column
- for col in 0..self.n {
- let rng = self.colptr[col]..self.colptr[col + 1];
- if self.rowval[rng].windows(2).any(|c| c[0] >= c[1]) {
- return Err(SparseFormatError::BadRowval);
- }
- }
- //check for row values out of bounds
- if !self.rowval.iter().all(|r| r < &self.m) {
- return Err(SparseFormatError::BadRowval);
- }
-
- Ok(())
- }
-
- /// allocate space for a sparse matrix with `nnz` elements
- ///
- /// To make an m x n matrix of zeros, use
- /// ```
- /// # use clarabel::algebra::CscMatrix;
- /// # let m = 3;
- /// # let n = 4;
- /// let A : CscMatrix = CscMatrix::spalloc(m,n,0);
- /// ```
-
- pub fn spalloc(m: usize, n: usize, nnz: usize) -> Self {
- let mut colptr = vec![0; n + 1];
- let rowval = vec![0; nnz];
- let nzval = vec![T::zero(); nnz];
- colptr[n] = nnz;
-
- CscMatrix::new(m, n, colptr, rowval, nzval)
- }
-
- /// Identity matrix of size `n`
- pub fn identity(n: usize) -> Self {
- let colptr = (0usize..=n).collect();
- let rowval = (0usize..n).collect();
- let nzval = vec![T::one(); n];
-
- CscMatrix::new(n, n, colptr, rowval, nzval)
- }
-
- /// horizontal matrix concatenation:
- /// ```text
- /// C = [A B]
- /// ```
- /// # Panics
- /// Panics if row dimensions are incompatible
-
- pub fn hcat(A: &Self, B: &Self) -> Self {
- //first check for compatible row dimensions
- assert_eq!(A.m, B.m);
-
- //dimensions for C = [A B];
- let nnz = A.nnz() + B.nnz();
- let m = A.m; //rows
- let n = A.n + B.n; //cols s
- let mut C = CscMatrix::spalloc(m, n, nnz);
-
- //we make dummy mapping indices since we don't care
- //where the entries go. An alternative would be to
- //modify the fill_block method to accept Option<_>
- let mut amap = vec![0usize; A.nnz()];
- let mut bmap = vec![0usize; B.nnz()];
-
- //compute column counts and fill
- C.colcount_block(A, 0, MatrixShape::N);
- C.colcount_block(B, A.n, MatrixShape::N);
- C.colcount_to_colptr();
-
- C.fill_block(A, &mut amap, 0, 0, MatrixShape::N);
- C.fill_block(B, &mut bmap, 0, A.n, MatrixShape::N);
- C.backshift_colptrs();
-
- C
- }
-
- /// vertical matrix concatenation:
- /// ```text
- /// C = [ A ]
- /// [ B ]
- /// ```
- ///
- /// # Panics
- /// Panics if column dimensions are incompatible
-
- pub fn vcat(A: &Self, B: &Self) -> Self {
- //first check for compatible column dimensions
- assert_eq!(A.n, B.n);
-
- //dimensions for C = [A; B];
- let nnz = A.nnz() + B.nnz();
- let m = A.m + B.m; //rows
- let n = A.n; //cols s
- let mut C = CscMatrix::spalloc(m, n, nnz);
-
- //we make dummy mapping indices since we don't care
- //where the entries go. An alternative would be to
- //modify the fill_block method to accept Option<_>
- let mut amap = vec![0usize; A.nnz()];
- let mut bmap = vec![0usize; B.nnz()];
-
- //compute column counts and fill
- C.colcount_block(A, 0, MatrixShape::N);
- C.colcount_block(B, 0, MatrixShape::N);
- C.colcount_to_colptr();
-
- C.fill_block(A, &mut amap, 0, 0, MatrixShape::N);
- C.fill_block(B, &mut bmap, A.m, 0, MatrixShape::N);
- C.backshift_colptrs();
- C
- }
-
- /// Select a subset of the rows of a sparse matrix
- ///
- /// # Panics
- /// Panics if row dimensions are incompatible
-
- pub fn select_rows(&self, rowidx: &Vec) -> Self {
- //first check for compatible row dimensions
- assert_eq!(rowidx.len(), self.m);
-
- //count the number of rows in the reduced matrix and build an
- //index from the logical rowidx to the reduced row number
- let mut rridx = vec![0; self.m];
- let mut mred = 0;
- for (r, is_used) in rridx.iter_mut().zip(rowidx) {
- if *is_used {
- *r = mred;
- mred += 1;
- }
- }
-
- // count the nonzeros in Ared
- let nzred = self.rowval.iter().filter(|&r| rowidx[*r]).count();
-
- // Allocate a reduced size A
- let mut Ared = CscMatrix::spalloc(mred, self.n, nzred);
-
- //populate new matrix
- let mut ptrred = 0;
- for col in 0..self.n {
- Ared.colptr[col] = ptrred;
- for ptr in self.colptr[col]..self.colptr[col + 1] {
- let thisrow = self.rowval[ptr];
- if rowidx[thisrow] {
- Ared.rowval[ptrred] = rridx[thisrow];
- Ared.nzval[ptrred] = self.nzval[ptr];
- ptrred += 1;
- }
- }
- Ared.colptr[Ared.n] = ptrred;
- }
-
- Ared
- }
-
- /// Allocates a new matrix containing only entries from the upper triangular part
-
- pub fn to_triu(&self) -> Self {
- assert_eq!(self.m, self.n);
- let (m, n) = (self.m, self.n);
- let mut colptr = vec![0; n + 1];
- let mut nnz = 0;
-
- //count the number of entries in the upper triangle
- //and place the totals into colptr
-
- for col in 0..n {
- //start / stop indices for the current column
- let first = self.colptr[col];
- let last = self.colptr[col + 1];
- let rows = &self.rowval[first..last];
-
- // number of entries on or above diagonal in this column,
- // shifted by 1 (i.e. colptr keeps a 0 in the first column)
- colptr[col + 1] = rows.iter().filter(|&row| *row <= col).count();
- nnz += colptr[col + 1];
- }
-
- //allocate and copy the upper triangle entries of
- //each column into the new value vector.
- //NB! : assumes that entries in each column have
- //monotonically increasing row numbers
- let mut rowval = vec![0; nnz];
- let mut nzval = vec![T::zero(); nnz];
-
- for col in 0..self.n {
- let ntriu = colptr[col + 1];
-
- //start / stop indices for the destination
- let fdest = colptr[col];
- let ldest = fdest + ntriu;
-
- //start / stop indices for the source
- let fsrc = self.colptr[col];
- let lsrc = fsrc + ntriu;
-
- //copy upper triangle values
- rowval[fdest..ldest].copy_from_slice(&self.rowval[fsrc..lsrc]);
- nzval[fdest..ldest].copy_from_slice(&self.nzval[fsrc..lsrc]);
-
- //this should now be cumsum of the counts
- colptr[col + 1] = ldest;
- }
- CscMatrix::new(m, n, colptr, rowval, nzval)
- }
-}
-
-//---------------------------------------------------------
-// The remainder of the CscMatrix implementation consists of
-// low-level utilities for counting / filling entries in
-// block partitioned sparse matrices. NB: this is still
-// impl CscMatrix, as above, but everything is
-// pub(crate)
-//---------------------------------------------------------
-
-impl CscMatrix
-where
- T: FloatT,
-{
- // increment self.colptr by the number of nonzeros
- // in a dense upper/lower triangle on the diagonal.
- pub(crate) fn colcount_dense_triangle(
- &mut self,
- initcol: usize,
- blockcols: usize,
- shape: MatrixTriangle,
- ) {
- let cols = self.colptr[(initcol)..(initcol + blockcols)].iter_mut();
- let counts = 1..(blockcols + 1);
- match shape {
- MatrixTriangle::Triu => {
- cols.zip(counts).for_each(|(x, c)| *x += c);
- }
-
- MatrixTriangle::Tril => {
- cols.zip(counts.rev()).for_each(|(x, c)| *x += c);
- }
- }
- }
-
- // increment the self.colptr by the number of nonzeros
- // in a square diagonal matrix placed on the diagonal.
- pub(crate) fn colcount_diag(&mut self, initcol: usize, blockcols: usize) {
- let cols = self.colptr[initcol..(initcol + blockcols)].iter_mut();
- cols.for_each(|x| *x += 1);
- }
-
- // same as kkt_count_diag, but counts places
- // where the input matrix M has a missing
- // diagonal entry. M must be square and TRIU
- pub(crate) fn colcount_missing_diag(&mut self, M: &CscMatrix, initcol: usize) {
- assert_eq!(M.colptr.len(), M.n + 1);
- assert!(self.colptr.len() >= M.n + initcol);
-
- for i in 0..M.n {
- if M.colptr[i] == M.colptr[i+1] || // completely empty column
- M.rowval[M.colptr[i+1]-1] != i
- // last element is not on diagonal
- {
- self.colptr[i + initcol] += 1;
- }
- }
- }
-
- // increment the self.colptr by the a number of nonzeros.
- // used to account for the placement of a column
- // vector that partially populates the column
- pub(crate) fn colcount_colvec(&mut self, n: usize, _firstrow: usize, firstcol: usize) {
- // just add the vector length to this column
- self.colptr[firstcol] += n;
- }
-
- // increment the self.colptr by 1 for every element
- // used to account for the placement of a column
- // vector that partially populates the column
- pub(crate) fn colcount_rowvec(&mut self, n: usize, _firstrow: usize, firstcol: usize) {
- // add one element to each of n consective columns
- // starting from initcol. The row index doesn't
- // matter here.
- let cols = self.colptr[firstcol..(firstcol + n)].iter_mut();
- cols.for_each(|x| *x += 1);
- }
-
- // increment the self.colptr by the number of nonzeros in M
-
- pub(crate) fn colcount_block(&mut self, M: &CscMatrix, initcol: usize, shape: MatrixShape) {
- match shape {
- MatrixShape::T => {
- for row in M.rowval.iter() {
- self.colptr[initcol + row] += 1;
- }
- }
- MatrixShape::N => {
- // just add the column count
- for i in 0..M.n {
- self.colptr[initcol + i] += M.colptr[i + 1] - M.colptr[i];
- }
- }
- }
- }
-
- //populate a partial column with zeros using the self.colptr as the indicator
- // the next fill location in each row.
- pub(crate) fn fill_colvec(&mut self, vtoKKT: &mut [usize], initrow: usize, initcol: usize) {
- for (i, v) in vtoKKT.iter_mut().enumerate() {
- let dest = self.colptr[initcol];
- self.rowval[dest] = initrow + i;
- self.nzval[dest] = T::zero();
- *v = dest;
- self.colptr[initcol] += 1;
- }
- }
-
- // populate a partial row with zeros using the self.colptr as indicator of
- // next fill location in each row.
- pub(crate) fn fill_rowvec(&mut self, vtoKKT: &mut [usize], initrow: usize, initcol: usize) {
- for (i, v) in vtoKKT.iter_mut().enumerate() {
- let col = initcol + i;
- let dest = self.colptr[col];
- self.rowval[dest] = initrow;
- self.nzval[dest] = T::zero();
- *v = dest;
- self.colptr[col] += 1;
- }
- }
-
- // populate values from M using the self.colptr as indicator of
- // next fill location in each row.
- pub(crate) fn fill_block(
- &mut self,
- M: &CscMatrix,
- MtoKKT: &mut [usize],
- initrow: usize,
- initcol: usize,
- shape: MatrixShape,
- ) {
- for i in 0..M.n {
- let z = M.rowval.iter().zip(M.nzval.iter());
- let start = M.colptr[i];
- let stop = M.colptr[i + 1];
-
- for (j, (&Mrow, &Mval)) in z.take(stop).skip(start).enumerate() {
- let (col, row);
-
- match shape {
- MatrixShape::T => {
- col = Mrow + initcol;
- row = i + initrow;
- }
- MatrixShape::N => {
- col = i + initcol;
- row = Mrow + initrow;
- }
- };
-
- let dest = self.colptr[col];
- self.rowval[dest] = row;
- self.nzval[dest] = Mval;
- self.colptr[col] += 1;
- MtoKKT[j] = dest;
- }
- }
- }
-
- // Populate the upper or lower triangle with 0s using the self.colptr
- // as indicator of next fill location in each row
- pub(crate) fn fill_dense_triangle(
- &mut self,
- blocktoKKT: &mut [usize],
- offset: usize,
- blockdim: usize,
- shape: MatrixTriangle,
- ) {
- // data will always be supplied as triu, so when filling it into
- // a tril shape we also need to transpose it. Just write two
- // separate functions for clarity here
-
- match shape {
- MatrixTriangle::Triu => {
- self._fill_dense_triangle_triu(blocktoKKT, offset, blockdim);
- }
-
- MatrixTriangle::Tril => {
- self._fill_dense_triangle_tril(blocktoKKT, offset, blockdim);
- }
- }
- }
-
- pub(crate) fn _fill_dense_triangle_triu(
- &mut self,
- blocktoKKT: &mut [usize],
- offset: usize,
- blockdim: usize,
- ) {
- let mut kidx = 0;
- for col in offset..(offset + blockdim) {
- for row in offset..=col {
- let dest = self.colptr[col];
- self.rowval[dest] = row;
- self.nzval[dest] = T::zero(); //structural zero
- self.colptr[col] += 1;
- blocktoKKT[kidx] = dest;
- kidx += 1;
- }
- }
- }
-
- pub(crate) fn _fill_dense_triangle_tril(
- &mut self,
- blocktoKKT: &mut [usize],
- offset: usize,
- blockdim: usize,
- ) {
- let mut kidx = 0;
- for col in offset..(offset + blockdim) {
- for row in offset..=col {
- let dest = self.colptr[col];
- self.rowval[dest] = row;
- self.nzval[dest] = T::zero(); //structural zero
- self.colptr[col] += 1;
- blocktoKKT[kidx] = dest;
- kidx += 1;
- }
- }
- }
-
- // Populate the diagonal with 0s using the K.colptr as indicator of
- // next fill location in each row
- pub(crate) fn fill_diag(&mut self, diagtoKKT: &mut [usize], offset: usize, blockdim: usize) {
- for (i, col) in (offset..(offset + blockdim)).enumerate() {
- let dest = self.colptr[col];
- self.rowval[dest] = col;
- self.nzval[dest] = T::zero(); //structural zero
- self.colptr[col] += 1;
- diagtoKKT[i] = dest;
- }
- }
-
- // same as fill_diag, but only places zero
- // entries where the input matrix M has a missing
- // diagonal entry. M must be square and TRIU
- pub(crate) fn fill_missing_diag(&mut self, M: &CscMatrix, initcol: usize) {
- for i in 0..M.n {
- // fill out missing diagonal terms only
- if M.colptr[i] == M.colptr[i+1] || // completely empty column
- M.rowval[M.colptr[i+1]-1] != i
- {
- // last element is not on diagonal
- let dest = self.colptr[i + initcol];
- self.rowval[dest] = i + initcol;
- self.nzval[dest] = T::zero(); //structural zero
- self.colptr[i] += 1;
- }
- }
- }
-
- pub(crate) fn colcount_to_colptr(&mut self) {
- let mut currentptr = 0;
- for p in &mut self.colptr {
- let count = *p;
- *p = currentptr;
- currentptr += count;
- }
- }
-
- pub(crate) fn backshift_colptrs(&mut self) {
- self.colptr.rotate_right(1);
- self.colptr[0] = 0;
- }
-
- pub(crate) fn count_diagonal_entries(&self) -> usize {
- let mut count = 0;
- for i in 0..self.n {
- // compare last entry in each column with
- // its row number to identify diagonal entries
- if self.colptr[i+1] != self.colptr[i] && // nonempty column
- self.rowval[self.colptr[i+1]-1] == i
- {
- // last element is on diagonal
- count += 1;
- }
- }
- count
- }
-}
diff --git a/src/algebra/mod.rs b/src/algebra/mod.rs
index 60e54629..1d22fb09 100644
--- a/src/algebra/mod.rs
+++ b/src/algebra/mod.rs
@@ -1,6 +1,6 @@
//! Clarabel algebra module.
//!
-//! __NB__: Users will not ordinarily need to interact with this crate except for defining
+//! __NB__: Users will not ordinarily need to interact with this module except for defining
//! sparse matrix inputs in [`CscMatrix`](crate::algebra::CscMatrix) format.
//!
//! Clarabel comes with its own standalone implementation of all required internal algebraic operations implemented through the [`ScalarMath`](crate::algebra::VectorMath), [`VectorMath`](crate::algebra::VectorMath) and [`MatrixMath`](crate::algebra::MatrixMath) traits. Future versions may add implementations of these traits through external libraries as optional features.
@@ -11,22 +11,38 @@
// first import and flatten the solver's collection
// of core numeric types and matrix / vector traits.
+mod adjoint;
+mod error_types;
mod floats;
-mod matrix_dense_3;
+mod math_traits;
+mod matrix_traits;
mod matrix_types;
-mod matrix_utils;
-mod traits;
+mod reshaped;
+mod scalarmath;
+mod symmetric;
+mod vecmath;
+pub use adjoint::*;
+pub use error_types::*;
pub use floats::*;
-pub(crate) use matrix_dense_3::*;
+pub use math_traits::*;
+pub use matrix_traits::*;
pub use matrix_types::*;
-pub use matrix_utils::*;
-pub use traits::*;
+pub use reshaped::*;
+pub(crate) use scalarmath::*;
+pub use symmetric::*;
+pub use vecmath::*;
-// here we select the particular numeric implementation of
-// the core traits. For now, we only have the hand-written
-// one, so there is nothing to configure
-mod native;
-pub use native::*;
+// matrix implementations
+mod csc;
+pub use csc::*;
+
+mod densesym3x3;
+pub(crate) use densesym3x3::*;
+
+#[cfg(feature = "sdp")]
+mod dense;
+#[cfg(feature = "sdp")]
+pub use dense::*;
//configure tests of internals
#[cfg(test)]
diff --git a/src/algebra/reshaped.rs b/src/algebra/reshaped.rs
new file mode 100644
index 00000000..28a2f29b
--- /dev/null
+++ b/src/algebra/reshaped.rs
@@ -0,0 +1,17 @@
+/// Adjoint of a matrix
+use crate::algebra::{FloatT, MatrixShape, ReshapedMatrix, ShapedMatrix};
+
+impl<'a, T> ShapedMatrix for ReshapedMatrix<'a, T>
+where
+ T: FloatT,
+{
+ fn nrows(&self) -> usize {
+ self.m
+ }
+ fn ncols(&self) -> usize {
+ self.n
+ }
+ fn shape(&self) -> MatrixShape {
+ MatrixShape::N
+ }
+}
diff --git a/src/algebra/scalarmath.rs b/src/algebra/scalarmath.rs
new file mode 100644
index 00000000..55ede452
--- /dev/null
+++ b/src/algebra/scalarmath.rs
@@ -0,0 +1,33 @@
+use super::{FloatT, ScalarMath};
+
+impl ScalarMath for T {
+ type T = T;
+ fn clip(&self, min_thresh: T, max_thresh: T, min_new: T, max_new: T) -> T {
+ if *self < min_thresh {
+ min_new
+ } else if *self > max_thresh {
+ max_new
+ } else {
+ *self
+ }
+ }
+
+ fn logsafe(&self) -> T {
+ if *self <= T::zero() {
+ -T::infinity()
+ } else {
+ self.ln()
+ }
+ }
+}
+
+pub(crate) fn triangular_number(k: usize) -> usize {
+ (k * (k + 1)) >> 1
+}
+
+#[cfg(feature = "sdp")]
+pub(crate) fn triangular_index(k: usize) -> usize {
+ // 0-based index into a packed triangle. Same as:
+ // triangular number(k+1) - 1 = (((k+1) * (k+2)) >> 1) - 1
+ (k * (k + 3)) >> 1
+}
diff --git a/src/algebra/symmetric.rs b/src/algebra/symmetric.rs
new file mode 100644
index 00000000..fd0c6fab
--- /dev/null
+++ b/src/algebra/symmetric.rs
@@ -0,0 +1,17 @@
+/// Adjoint of a matrix
+use crate::algebra::{MatrixShape, ShapedMatrix, Symmetric};
+
+impl<'a, M> ShapedMatrix for Symmetric<'a, M>
+where
+ M: ShapedMatrix,
+{
+ fn nrows(&self) -> usize {
+ self.src.nrows()
+ }
+ fn ncols(&self) -> usize {
+ self.src.ncols()
+ }
+ fn shape(&self) -> MatrixShape {
+ MatrixShape::N
+ }
+}
diff --git a/src/algebra/tests/matrix.rs b/src/algebra/tests/matrix.rs
index c76e15f9..8cfb8010 100644
--- a/src/algebra/tests/matrix.rs
+++ b/src/algebra/tests/matrix.rs
@@ -215,13 +215,13 @@ fn test_gemv() {
let a = 2.;
let b = -3.;
- A.gemv(&mut y, MatrixShape::N, &x, a, b);
+ A.gemv(&mut y, &x, a, b);
assert_eq!(y, vec![7., 66., 35.]);
let mut y = vec![1., -2., 3., -4.];
let x = vec![5., -6., 7.];
- A.gemv(&mut y, MatrixShape::T, &x, a, b);
+ A.t().gemv(&mut y, &x, a, b);
assert_eq!(y, vec![-49., -220., -33., 42.]);
}
@@ -233,7 +233,8 @@ fn test_symv() {
let a = -2.;
let b = 3.;
- A.symv(&mut y, MatrixTriangle::Triu, &x, a, b);
+ let Asym = A.sym();
+ Asym.symv(&mut y, &x, a, b);
assert_eq!(y, vec![46.0, -29.0, -25.0, -4.0]);
}
diff --git a/src/algebra/tests/vector.rs b/src/algebra/tests/vector.rs
index 5e49d1f4..d083ce11 100644
--- a/src/algebra/tests/vector.rs
+++ b/src/algebra/tests/vector.rs
@@ -1,11 +1,5 @@
use crate::algebra::*;
-fn inf_norm_diff(a: &[T], b: &[T]) -> T {
- a.iter()
- .zip(b)
- .fold(T::zero(), |acc, (x, y)| T::max(acc, T::abs(*x - *y)))
-}
-
#[test]
fn test_copy_from() {
let x = vec![3., 0., 2., 1.];
@@ -55,7 +49,7 @@ fn test_scale() {
fn test_recip() {
let mut x = vec![3., 10., 2., 1.];
x.recip();
- assert!(inf_norm_diff(&x, &[1. / 3., 1. / 10., 1. / 2., 1.]) < 1e-8);
+ assert!(x.norm_inf_diff(&[1. / 3., 1. / 10., 1. / 2., 1.]) < 1e-8);
}
#[test]
diff --git a/src/algebra/vecmath.rs b/src/algebra/vecmath.rs
new file mode 100644
index 00000000..4b87b561
--- /dev/null
+++ b/src/algebra/vecmath.rs
@@ -0,0 +1,181 @@
+use super::{FloatT, ScalarMath, VectorMath};
+use std::iter::zip;
+
+impl VectorMath for [T] {
+ type T = T;
+ fn copy_from(&mut self, src: &[T]) -> &mut Self {
+ self.copy_from_slice(src);
+ self
+ }
+
+ fn select(&self, index: &[bool]) -> Vec {
+ assert_eq!(self.len(), index.len());
+ zip(self, index)
+ .filter(|(_x, &b)| b)
+ .map(|(&x, _b)| x)
+ .collect()
+ }
+
+ fn scalarop(&mut self, op: impl Fn(T) -> T) -> &mut Self {
+ for x in &mut *self {
+ *x = op(*x);
+ }
+ self
+ }
+
+ fn scalarop_from(&mut self, op: impl Fn(T) -> T, v: &[T]) -> &mut Self {
+ for (x, v) in zip(&mut *self, v) {
+ *x = op(*v);
+ }
+ self
+ }
+
+ fn translate(&mut self, c: T) -> &mut Self {
+ //NB: translate is a scalar shift of all variables and is
+ //used only in the NN cone to force vectors into R^n_+
+ self.scalarop(|x| x + c)
+ }
+
+ fn set(&mut self, c: T) -> &mut Self {
+ self.scalarop(|_x| c)
+ }
+
+ fn scale(&mut self, c: T) -> &mut Self {
+ self.scalarop(|x| x * c)
+ }
+
+ fn recip(&mut self) -> &mut Self {
+ self.scalarop(T::recip)
+ }
+
+ fn sqrt(&mut self) -> &mut Self {
+ self.scalarop(T::sqrt)
+ }
+
+ fn rsqrt(&mut self) -> &mut Self {
+ self.scalarop(|x| T::recip(T::sqrt(x)))
+ }
+
+ fn negate(&mut self) -> &mut Self {
+ self.scalarop(|x| -x)
+ }
+
+ fn hadamard(&mut self, y: &[T]) -> &mut Self {
+ zip(&mut *self, y).for_each(|(x, y)| *x *= *y);
+ self
+ }
+
+ fn clip(&mut self, min_thresh: T, max_thresh: T, min_new: T, max_new: T) -> &mut Self {
+ self.scalarop(|x| x.clip(min_thresh, max_thresh, min_new, max_new))
+ }
+
+ fn normalize(&mut self) -> T {
+ let norm = self.norm();
+ if norm == T::zero() {
+ return T::zero();
+ }
+ self.scale(norm.recip());
+ norm
+ }
+
+ fn dot(&self, y: &[T]) -> T {
+ zip(self, y).fold(T::zero(), |acc, (&x, &y)| acc + x * y)
+ }
+
+ fn dot_shifted(z: &[T], s: &[T], dz: &[T], ds: &[T], α: T) -> T {
+ assert_eq!(z.len(), s.len());
+ assert_eq!(z.len(), dz.len());
+ assert_eq!(s.len(), ds.len());
+
+ let mut out = T::zero();
+ for ((&s, &ds), (&z, &dz)) in zip(zip(s, ds), zip(z, dz)) {
+ let si = s + α * ds;
+ let zi = z + α * dz;
+ out += si * zi;
+ }
+ out
+ }
+
+ fn dist(&self, y: &Self) -> T {
+ let dist2 = zip(self, y).fold(T::zero(), |acc, (&x, &y)| acc + T::powi(x - y, 2));
+ T::sqrt(dist2)
+ }
+
+ fn sumsq(&self) -> T {
+ self.dot(self)
+ }
+
+ // 2-norm
+ fn norm(&self) -> T {
+ T::sqrt(self.sumsq())
+ }
+
+ //scaled norm of elementwise produce self.*v
+ fn norm_scaled(&self, v: &[T]) -> T {
+ assert_eq!(self.len(), v.len());
+ let total = zip(self, v).fold(T::zero(), |acc, (&x, &y)| {
+ let prod = x * y;
+ acc + prod * prod
+ });
+ T::sqrt(total)
+ }
+
+ // Returns infinity norm, ignoring NaNs
+ fn norm_inf(&self) -> T {
+ let mut out = T::zero();
+ for v in self.iter().map(|v| v.abs()) {
+ out = if v > out { v } else { out };
+ }
+ out
+ }
+
+ // Returns one norm
+ fn norm_one(&self) -> T {
+ self.iter().fold(T::zero(), |acc, v| acc + v.abs())
+ }
+
+ // max absolute difference (used for unit testing)
+ fn norm_inf_diff(&self, b: &[T]) -> T {
+ zip(self, b).fold(T::zero(), |acc, (x, y)| T::max(acc, T::abs(*x - *y)))
+ }
+
+ fn minimum(&self) -> T {
+ self.iter().fold(T::infinity(), |r, &s| T::min(r, s))
+ }
+
+ fn maximum(&self) -> T {
+ self.iter().fold(-T::infinity(), |r, &s| T::max(r, s))
+ }
+
+ fn mean(&self) -> T {
+ let mean = if self.is_empty() {
+ T::zero()
+ } else {
+ let num = self.iter().fold(T::zero(), |r, &s| r + s);
+ let den = T::from_usize(self.len()).unwrap();
+ num / den
+ };
+ mean
+ }
+
+ fn is_finite(&self) -> bool {
+ self.iter().all(|&x| T::is_finite(x))
+ }
+
+ fn axpby(&mut self, a: T, x: &[T], b: T) -> &mut Self {
+ assert_eq!(self.len(), x.len());
+
+ zip(&mut *self, x).for_each(|(y, x)| *y = a * (*x) + b * (*y));
+ self
+ }
+
+ fn waxpby(&mut self, a: T, x: &[T], b: T, y: &[T]) -> &mut Self {
+ assert_eq!(self.len(), x.len());
+ assert_eq!(self.len(), y.len());
+
+ for (w, (x, y)) in zip(&mut *self, zip(x, y)) {
+ *w = a * (*x) + b * (*y);
+ }
+ self
+ }
+}
diff --git a/src/julia/ClarabelRs/Project.toml b/src/julia/ClarabelRs/Project.toml
index 3c8c6c74..15f17ee1 100644
--- a/src/julia/ClarabelRs/Project.toml
+++ b/src/julia/ClarabelRs/Project.toml
@@ -1,7 +1,7 @@
name = "ClarabelRs"
uuid = "a0c58a0a-712c-48b7-9fd4-64369ecb2011"
authors = ["Paul Goulart "]
-version = "0.4.1"
+version = "0.5.0"
[deps]
Clarabel = "61c947e1-3e6d-4ee4-985a-eec8c727bd6e"
@@ -11,4 +11,4 @@ Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[compat]
-Clarabel = "0.4.1"
+Clarabel = "0.5.0"
diff --git a/src/julia/ClarabelRs/src/interface.jl b/src/julia/ClarabelRs/src/interface.jl
index 5390efa5..23d53666 100644
--- a/src/julia/ClarabelRs/src/interface.jl
+++ b/src/julia/ClarabelRs/src/interface.jl
@@ -61,7 +61,7 @@ function solver_new_jlrs(P,q,A,b,cones,settings)
serialize(settings), #serialized settings
)
- return Solver(ptr)
+ return Solver{Float64}(ptr)
end
@@ -130,6 +130,9 @@ function ccall_cones_to_arrays(cones::Vector{Clarabel.SupportedCone})
cone_enums[i] = UInt8(PowerConeT::ConeEnumJLRS)
cone_floats[i] = cone.α
+ elseif isa(cone, Clarabel.PSDTriangleConeT)
+ cone_enums[i] = UInt8(PSDTriangleConeT::ConeEnumJLRS)
+ cone_ints[i] = cone.dim;
else
error("Cone type ", typeof(cone), " is not supported through this interface.");
end
diff --git a/src/julia/ClarabelRs/src/types.jl b/src/julia/ClarabelRs/src/types.jl
index 63bb3cf1..ca31713e 100644
--- a/src/julia/ClarabelRs/src/types.jl
+++ b/src/julia/ClarabelRs/src/types.jl
@@ -76,13 +76,14 @@ end
SecondOrderConeT = 2
ExponentialConeT = 3
PowerConeT = 4
+ PSDTriangleConeT = 5
end
-mutable struct Solver <: Clarabel.AbstractSolver{Float64}
+mutable struct Solver{T <: Float64} <: Clarabel.AbstractSolver{Float64}
ptr:: Ptr{Cvoid}
- function Solver(ptr)
+ function Solver{T}(ptr) where T
obj = new(ptr)
finalizer(solver_drop_jlrs,obj)
return obj
diff --git a/src/julia/ClarabelRs/test/MOI_wrapper_tests.jl b/src/julia/ClarabelRs/test/MOI_wrapper_tests.jl
index a54fa32e..07da7eca 100644
--- a/src/julia/ClarabelRs/test/MOI_wrapper_tests.jl
+++ b/src/julia/ClarabelRs/test/MOI_wrapper_tests.jl
@@ -70,7 +70,7 @@ function test_MOI_standard()
BRIDGED,
MYCONFIG,
# use `include` to single out a problem class
- #include = String["test_conic_"],
+ #include = String["test_basic_VectorQuadraticFunction_RelativeEntropyCone"],
exclude = String[
#these two tests fail intermittently depending on platform
#and MOI version. They both converge to reasonable accuracy.
@@ -95,7 +95,7 @@ function test_SolverName()
return
end
-# Settings don't work like this in the compiled versionss
+# Settings don't work like this in the compiled versions
# function test_passing_settings()
# optimizer = ClarabelRs.Optimizer{T}(; verbose=false)
# @test optimizer.solver_settings.verbose === false
diff --git a/src/julia/interface.rs b/src/julia/interface.rs
index 35c3c6ea..04a152cf 100644
--- a/src/julia/interface.rs
+++ b/src/julia/interface.rs
@@ -47,6 +47,7 @@ fn ccall_arrays_to_cones(
Some(ConeEnumJLRS::SecondOrderConeT) => SecondOrderConeT(cones_ints[i] as usize),
Some(ConeEnumJLRS::ExponentialConeT) => ExponentialConeT(),
Some(ConeEnumJLRS::PowerConeT) => PowerConeT(_cones_floats[i]),
+ Some(ConeEnumJLRS::PSDTriangleConeT) => PSDTriangleConeT(cones_ints[i] as usize),
None => panic!("Received unrecognized cone type"),
};
cones.push(cone)
diff --git a/src/julia/types.rs b/src/julia/types.rs
index 2624925a..111faee3 100644
--- a/src/julia/types.rs
+++ b/src/julia/types.rs
@@ -124,4 +124,5 @@ pub(crate) enum ConeEnumJLRS {
SecondOrderConeT = 2,
ExponentialConeT = 3,
PowerConeT = 4,
+ PSDTriangleConeT = 5,
}
diff --git a/src/python/cones_py.rs b/src/python/cones_py.rs
index 845fbd2b..5b340a6e 100644
--- a/src/python/cones_py.rs
+++ b/src/python/cones_py.rs
@@ -106,6 +106,22 @@ impl PyPowerConeT {
}
}
+#[pyclass(name = "PSDTriangleConeT")]
+pub struct PyPSDTriangleConeT {
+ #[pyo3(get)]
+ pub dim: usize,
+}
+#[pymethods]
+impl PyPSDTriangleConeT {
+ #[new]
+ pub fn new(dim: usize) -> Self {
+ Self { dim }
+ }
+ pub fn __repr__(&self) -> String {
+ __repr__cone("PyPSDTriangleConeT", self.dim)
+ }
+}
+
// We can't implement the foreign trait FromPyObject directly on
// SupportedCone since both are defined outside the crate, so
// put a dummy wrapper around it here.
@@ -142,6 +158,10 @@ impl<'a> FromPyObject<'a> for PySupportedCone {
let α: f64 = obj.getattr("α")?.extract()?;
Ok(PySupportedCone(PowerConeT(α)))
}
+ "PSDTriangleConeT" => {
+ let dim: usize = obj.getattr("dim")?.extract()?;
+ Ok(PySupportedCone(PSDTriangleConeT(dim)))
+ }
_ => {
let mut errmsg = String::new();
write!(errmsg, "Unrecognized cone type : {}", thetype).unwrap();
diff --git a/src/python/mod.rs b/src/python/mod.rs
index 531aab2e..be8316c4 100644
--- a/src/python/mod.rs
+++ b/src/python/mod.rs
@@ -12,6 +12,7 @@ mod cones_py;
mod cscmatrix_py;
mod impl_default_py;
mod module_py;
+pub(crate) mod pyblas;
// NB : Nothing is actually public here, but the python module itself
// is made public so that we can force the docstring above to appear
diff --git a/src/python/module_py.rs b/src/python/module_py.rs
index f3607777..0208a831 100644
--- a/src/python/module_py.rs
+++ b/src/python/module_py.rs
@@ -1,6 +1,12 @@
use super::*;
use pyo3::prelude::*;
+#[pyfunction(name = "force_load_blas_lapack")]
+fn force_load_blas_lapack_py() {
+ //force BLAS/LAPACK fcn pointer load
+ crate::python::pyblas::force_load();
+}
+
// get/set for the solver's internal infinity limit
#[pyfunction(name = "get_infinity")]
fn get_infinity_py() -> f64 {
@@ -14,6 +20,7 @@ fn set_infinity_py(v: f64) {
fn default_infinity_py() {
crate::solver::default_infinity();
}
+
// Python module and registry, which includes registration of the
// data types defined in the other files in this rust module
#[pymodule]
@@ -21,6 +28,10 @@ fn clarabel(_py: Python, m: &PyModule) -> PyResult<()> {
//module version
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
+ // module initializer, called on module import
+ m.add_function(wrap_pyfunction!(force_load_blas_lapack_py, m)?)
+ .unwrap();
+
//module globs
m.add_function(wrap_pyfunction!(get_infinity_py, m)?)
.unwrap();
@@ -35,6 +46,7 @@ fn clarabel(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::()?;
m.add_class::()?;
m.add_class::()?;
+ m.add_class::()?;
//other API data types
m.add_class::()?;
diff --git a/src/python/pyblas/blas_loader.rs b/src/python/pyblas/blas_loader.rs
new file mode 100644
index 00000000..f03d0faa
--- /dev/null
+++ b/src/python/pyblas/blas_loader.rs
@@ -0,0 +1,25 @@
+use super::{blas_types::*, *};
+
+impl PyBlasPointers {
+ pub(crate) fn new(py: Python) -> PyResult {
+ let api = get_pyx_capi(py, "scipy.linalg.cython_blas")?;
+
+ unsafe {
+ let ptrs = PyBlasPointers {
+ ddot_: get_ptr!(api, "ddot"),
+ sdot_: get_ptr!(api, "sdot"),
+ dgemm_: get_ptr!(api, "dgemm"),
+ sgemm_: get_ptr!(api, "sgemm"),
+ dgemv_: get_ptr!(api, "dgemv"),
+ sgemv_: get_ptr!(api, "sgemv"),
+ dsymv_: get_ptr!(api, "dsymv"),
+ ssymv_: get_ptr!(api, "ssymv"),
+ dsyrk_: get_ptr!(api, "dsyrk"),
+ ssyrk_: get_ptr!(api, "ssyrk"),
+ dsyr2k_: get_ptr!(api, "dsyr2k"),
+ ssyr2k_: get_ptr!(api, "ssyr2k"),
+ };
+ Ok(ptrs)
+ }
+ }
+}
diff --git a/src/python/pyblas/blas_types.rs b/src/python/pyblas/blas_types.rs
new file mode 100644
index 00000000..45c5e7fa
--- /dev/null
+++ b/src/python/pyblas/blas_types.rs
@@ -0,0 +1,175 @@
+#![allow(clippy::upper_case_acronyms)]
+use libc::{c_char, c_double, c_float, c_int};
+
+pub struct PyBlasPointers {
+ pub ddot_: DDOT,
+ pub sdot_: SDOT,
+ pub dgemm_: DGEMM,
+ pub sgemm_: SGEMM,
+ pub dgemv_: DGEMV,
+ pub sgemv_: SGEMV,
+ pub dsymv_: DSYMV,
+ pub ssymv_: SSYMV,
+ pub dsyrk_: DSYRK,
+ pub ssyrk_: SSYRK,
+ pub dsyr2k_: DSYR2K,
+ pub ssyr2k_: SSYR2K,
+}
+
+type DDOT = extern "C" fn(
+ n: *const c_int,
+ x: *const c_double,
+ incx: *const c_int,
+ y: *const c_double,
+ incy: *const c_int,
+) -> c_double;
+
+type SDOT = extern "C" fn(
+ n: *const c_int,
+ x: *const c_float,
+ incx: *const c_int,
+ y: *const c_float,
+ incy: *const c_int,
+) -> c_float;
+
+type DGEMM = extern "C" fn(
+ transa: *const c_char,
+ transb: *const c_char,
+ m: *const c_int,
+ n: *const c_int,
+ k: *const c_int,
+ alpha: *const c_double,
+ a: *const c_double,
+ lda: *const c_int,
+ b: *const c_double,
+ ldb: *const c_int,
+ beta: *const c_double,
+ c: *mut c_double,
+ ldc: *const c_int,
+);
+
+type SGEMM = extern "C" fn(
+ transa: *const c_char,
+ transb: *const c_char,
+ m: *const c_int,
+ n: *const c_int,
+ k: *const c_int,
+ alpha: *const c_float,
+ a: *const c_float,
+ lda: *const c_int,
+ b: *const c_float,
+ ldb: *const c_int,
+ beta: *const c_float,
+ c: *mut c_float,
+ ldc: *const c_int,
+);
+
+type DGEMV = extern "C" fn(
+ trans: *const c_char,
+ m: *const c_int,
+ n: *const c_int,
+ alpha: *const c_double,
+ a: *const c_double,
+ lda: *const c_int,
+ x: *const c_double,
+ incx: *const c_int,
+ beta: *const c_double,
+ y: *mut c_double,
+ incy: *const c_int,
+);
+
+type SGEMV = extern "C" fn(
+ trans: *const c_char,
+ m: *const c_int,
+ n: *const c_int,
+ alpha: *const c_float,
+ a: *const c_float,
+ lda: *const c_int,
+ x: *const c_float,
+ incx: *const c_int,
+ beta: *const c_float,
+ y: *mut c_float,
+ incy: *const c_int,
+);
+
+type DSYMV = extern "C" fn(
+ uplo: *const c_char,
+ n: *const c_int,
+ alpha: *const c_double,
+ a: *const c_double,
+ lda: *const c_int,
+ x: *const c_double,
+ incx: *const c_int,
+ beta: *const c_double,
+ y: *mut c_double,
+ incy: *const c_int,
+);
+
+type SSYMV = extern "C" fn(
+ uplo: *const c_char,
+ n: *const c_int,
+ alpha: *const c_float,
+ a: *const c_float,
+ lda: *const c_int,
+ x: *const c_float,
+ incx: *const c_int,
+ beta: *const c_float,
+ y: *mut c_float,
+ incy: *const c_int,
+);
+
+type DSYRK = extern "C" fn(
+ uplo: *const c_char,
+ trans: *const c_char,
+ n: *const c_int,
+ k: *const c_int,
+ alpha: *const c_double,
+ a: *const c_double,
+ lda: *const c_int,
+ beta: *const c_double,
+ c: *mut c_double,
+ ldc: *const c_int,
+);
+
+type SSYRK = extern "C" fn(
+ uplo: *const c_char,
+ trans: *const c_char,
+ n: *const c_int,
+ k: *const c_int,
+ alpha: *const c_float,
+ a: *const c_float,
+ lda: *const c_int,
+ beta: *const c_float,
+ c: *mut c_float,
+ ldc: *const c_int,
+);
+
+type DSYR2K = extern "C" fn(
+ uplo: *const c_char,
+ trans: *const c_char,
+ n: *const c_int,
+ k: *const c_int,
+ alpha: *const c_double,
+ a: *const c_double,
+ lda: *const c_int,
+ b: *const c_double,
+ ldb: *const c_int,
+ beta: *const c_double,
+ c: *mut c_double,
+ ldc: *const c_int,
+);
+
+type SSYR2K = extern "C" fn(
+ uplo: *const c_char,
+ trans: *const c_char,
+ n: *const c_int,
+ k: *const c_int,
+ alpha: *const c_float,
+ a: *const c_float,
+ lda: *const c_int,
+ b: *const c_float,
+ ldb: *const c_int,
+ beta: *const c_float,
+ c: *mut c_float,
+ ldc: *const c_int,
+);
diff --git a/src/python/pyblas/blas_wrappers.rs b/src/python/pyblas/blas_wrappers.rs
new file mode 100644
index 00000000..1be6b84f
--- /dev/null
+++ b/src/python/pyblas/blas_wrappers.rs
@@ -0,0 +1,309 @@
+#![allow(clippy::too_many_arguments)]
+#![allow(clippy::missing_safety_doc)]
+#![allow(dead_code)]
+
+use super::blas_types::*;
+use lazy_static::lazy_static;
+use libc::c_char;
+
+lazy_static! {
+ static ref PYBLAS: PyBlasPointers = pyo3::Python::with_gil(|py| {
+ PyBlasPointers::new(py).expect("Failed to load SciPy BLAS bindings.")
+ });
+}
+
+pub(crate) fn force_load() {
+ //forces load of the lazy_static. Choice of function is arbitrary.
+ let _ = PYBLAS.ddot_;
+}
+
+pub unsafe fn ddot(n: i32, x: &[f64], incx: i32, y: &[f64], incy: i32) -> f64 {
+ (PYBLAS.ddot_)(&n, x.as_ptr(), &incx, y.as_ptr(), &incy)
+}
+
+pub unsafe fn sdot(n: i32, x: &[f32], incx: i32, y: &[f32], incy: i32) -> f32 {
+ (PYBLAS.sdot_)(&n, x.as_ptr(), &incx, y.as_ptr(), &incy)
+}
+
+pub unsafe fn dgemm(
+ transa: u8,
+ transb: u8,
+ m: i32,
+ n: i32,
+ k: i32,
+ alpha: f64,
+ a: &[f64],
+ lda: i32,
+ b: &[f64],
+ ldb: i32,
+ beta: f64,
+ c: &mut [f64],
+ ldc: i32,
+) {
+ (PYBLAS.dgemm_)(
+ &(transa as c_char),
+ &(transb as c_char),
+ &m,
+ &n,
+ &k,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ b.as_ptr(),
+ &ldb,
+ &beta,
+ c.as_mut_ptr(),
+ &ldc,
+ )
+}
+pub unsafe fn sgemm(
+ transa: u8,
+ transb: u8,
+ m: i32,
+ n: i32,
+ k: i32,
+ alpha: f32,
+ a: &[f32],
+ lda: i32,
+ b: &[f32],
+ ldb: i32,
+ beta: f32,
+ c: &mut [f32],
+ ldc: i32,
+) {
+ (PYBLAS.sgemm_)(
+ &(transa as c_char),
+ &(transb as c_char),
+ &m,
+ &n,
+ &k,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ b.as_ptr(),
+ &ldb,
+ &beta,
+ c.as_mut_ptr(),
+ &ldc,
+ )
+}
+
+pub unsafe fn dgemv(
+ trans: u8,
+ m: i32,
+ n: i32,
+ alpha: f64,
+ a: &[f64],
+ lda: i32,
+ x: &[f64],
+ incx: i32,
+ beta: f64,
+ y: &mut [f64],
+ incy: i32,
+) {
+ (PYBLAS.dgemv_)(
+ &(trans as c_char),
+ &m,
+ &n,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ x.as_ptr(),
+ &incx,
+ &beta,
+ y.as_mut_ptr(),
+ &incy,
+ )
+}
+
+pub unsafe fn sgemv(
+ trans: u8,
+ m: i32,
+ n: i32,
+ alpha: f32,
+ a: &[f32],
+ lda: i32,
+ x: &[f32],
+ incx: i32,
+ beta: f32,
+ y: &mut [f32],
+ incy: i32,
+) {
+ (PYBLAS.sgemv_)(
+ &(trans as c_char),
+ &m,
+ &n,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ x.as_ptr(),
+ &incx,
+ &beta,
+ y.as_mut_ptr(),
+ &incy,
+ )
+}
+
+pub unsafe fn dsymv(
+ uplo: u8,
+ n: i32,
+ alpha: f64,
+ a: &[f64],
+ lda: i32,
+ x: &[f64],
+ incx: i32,
+ beta: f64,
+ y: &mut [f64],
+ incy: i32,
+) {
+ (PYBLAS.dsymv_)(
+ &(uplo as c_char),
+ &n,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ x.as_ptr(),
+ &incx,
+ &beta,
+ y.as_mut_ptr(),
+ &incy,
+ )
+}
+
+pub unsafe fn ssymv(
+ uplo: u8,
+ n: i32,
+ alpha: f32,
+ a: &[f32],
+ lda: i32,
+ x: &[f32],
+ incx: i32,
+ beta: f32,
+ y: &mut [f32],
+ incy: i32,
+) {
+ (PYBLAS.ssymv_)(
+ &(uplo as c_char),
+ &n,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ x.as_ptr(),
+ &incx,
+ &beta,
+ y.as_mut_ptr(),
+ &incy,
+ )
+}
+
+pub unsafe fn dsyrk(
+ uplo: u8,
+ trans: u8,
+ n: i32,
+ k: i32,
+ alpha: f64,
+ a: &[f64],
+ lda: i32,
+ beta: f64,
+ c: &mut [f64],
+ ldc: i32,
+) {
+ (PYBLAS.dsyrk_)(
+ &(uplo as c_char),
+ &(trans as c_char),
+ &n,
+ &k,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ &beta,
+ c.as_mut_ptr(),
+ &ldc,
+ )
+}
+
+pub unsafe fn ssyrk(
+ uplo: u8,
+ trans: u8,
+ n: i32,
+ k: i32,
+ alpha: f32,
+ a: &[f32],
+ lda: i32,
+ beta: f32,
+ c: &mut [f32],
+ ldc: i32,
+) {
+ (PYBLAS.ssyrk_)(
+ &(uplo as c_char),
+ &(trans as c_char),
+ &n,
+ &k,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ &beta,
+ c.as_mut_ptr(),
+ &ldc,
+ )
+}
+
+pub unsafe fn dsyr2k(
+ uplo: u8,
+ trans: u8,
+ n: i32,
+ k: i32,
+ alpha: f64,
+ a: &[f64],
+ lda: i32,
+ b: &[f64],
+ ldb: i32,
+ beta: f64,
+ c: &mut [f64],
+ ldc: i32,
+) {
+ (PYBLAS.dsyr2k_)(
+ &(uplo as c_char),
+ &(trans as c_char),
+ &n,
+ &k,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ b.as_ptr(),
+ &ldb,
+ &beta,
+ c.as_mut_ptr(),
+ &ldc,
+ )
+}
+
+pub unsafe fn ssyr2k(
+ uplo: u8,
+ trans: u8,
+ n: i32,
+ k: i32,
+ alpha: f32,
+ a: &[f32],
+ lda: i32,
+ b: &[f32],
+ ldb: i32,
+ beta: f32,
+ c: &mut [f32],
+ ldc: i32,
+) {
+ (PYBLAS.ssyr2k_)(
+ &(uplo as c_char),
+ &(trans as c_char),
+ &n,
+ &k,
+ &alpha,
+ a.as_ptr(),
+ &lda,
+ b.as_ptr(),
+ &ldb,
+ &beta,
+ c.as_mut_ptr(),
+ &ldc,
+ )
+}
diff --git a/src/python/pyblas/lapack_loader.rs b/src/python/pyblas/lapack_loader.rs
new file mode 100644
index 00000000..d33a5d5b
--- /dev/null
+++ b/src/python/pyblas/lapack_loader.rs
@@ -0,0 +1,21 @@
+use super::{lapack_types::*, *};
+
+impl PyLapackPointers {
+ pub(crate) fn new(py: Python) -> PyResult {
+ let api = get_pyx_capi(py, "scipy.linalg.cython_lapack")?;
+
+ unsafe {
+ let ptrs = PyLapackPointers {
+ dsyevr_: get_ptr!(api, "dsyevr"),
+ ssyevr_: get_ptr!(api, "ssyevr"),
+ dpotrf_: get_ptr!(api, "dpotrf"),
+ spotrf_: get_ptr!(api, "spotrf"),
+ dgesdd_: get_ptr!(api, "dgesdd"),
+ sgesdd_: get_ptr!(api, "sgesdd"),
+ dgesvd_: get_ptr!(api, "dgesvd"),
+ sgesvd_: get_ptr!(api, "sgesvd"),
+ };
+ Ok(ptrs)
+ }
+ }
+}
diff --git a/src/python/pyblas/lapack_types.rs b/src/python/pyblas/lapack_types.rs
new file mode 100644
index 00000000..0e6ca5fb
--- /dev/null
+++ b/src/python/pyblas/lapack_types.rs
@@ -0,0 +1,145 @@
+#![allow(clippy::upper_case_acronyms)]
+use libc::{c_char, c_int};
+
+pub struct PyLapackPointers {
+ pub dsyevr_: DSYEVR,
+ pub ssyevr_: SSYEVR,
+ pub dpotrf_: DPOTRF,
+ pub spotrf_: SPOTRF,
+ pub dgesdd_: DGESDD,
+ pub sgesdd_: SGESDD,
+ pub dgesvd_: DGESVD,
+ pub sgesvd_: SGESVD,
+}
+
+type DSYEVR = extern "C" fn(
+ jobz: *const c_char,
+ range: *const c_char,
+ uplo: *const c_char,
+ n: *const c_int,
+ A: *mut f64,
+ lda: *const c_int,
+ vl: *const f64,
+ vu: *const f64,
+ il: *const c_int,
+ iu: *const c_int,
+ abstol: *const f64,
+ m: *mut c_int,
+ W: *mut f64,
+ Z: *mut f64,
+ ldz: *const c_int,
+ ISUPPZ: *mut c_int,
+ work: *mut f64,
+ lwork: *const c_int,
+ iwork: *mut c_int,
+ liwork: *const c_int,
+ info: *mut c_int,
+);
+
+type SSYEVR = extern "C" fn(
+ jobz: *const c_char,
+ range: *const c_char,
+ uplo: *const c_char,
+ n: *const c_int,
+ A: *mut f32,
+ lda: *const c_int,
+ vl: *const f32,
+ vu: *const f32,
+ il: *const c_int,
+ iu: *const c_int,
+ abstol: *const f32,
+ m: *mut c_int,
+ W: *mut f32,
+ Z: *mut f32,
+ ldz: *const c_int,
+ ISUPPZ: *mut c_int,
+ work: *mut f32,
+ lwork: *const c_int,
+ iwork: *mut c_int,
+ liwork: *const c_int,
+ info: *mut c_int,
+);
+
+type DPOTRF = extern "C" fn(
+ uplo: *const c_char,
+ n: *const c_int,
+ A: *mut f64,
+ lda: *const c_int,
+ info: *mut c_int,
+);
+
+type SPOTRF = extern "C" fn(
+ uplo: *const c_char,
+ n: *const c_int,
+ A: *mut f32,
+ lda: *const c_int,
+ info: *mut c_int,
+);
+
+type DGESDD = extern "C" fn(
+ jobz: *const c_char,
+ m: *const c_int,
+ n: *const c_int,
+ A: *mut f64,
+ lda: *const c_int,
+ S: *mut f64,
+ U: *mut f64,
+ ldu: *const c_int,
+ VT: *mut f64,
+ ldvt: *const c_int,
+ work: *mut f64,
+ lwork: *const c_int,
+ iwork: *mut c_int,
+ info: *mut c_int,
+);
+
+type SGESDD = extern "C" fn(
+ jobz: *const c_char,
+ m: *const c_int,
+ n: *const c_int,
+ A: *mut f32,
+ lda: *const c_int,
+ S: *mut f32,
+ U: *mut f32,
+ ldu: *const c_int,
+ VT: *mut f32,
+ ldvt: *const c_int,
+ work: *mut f32,
+ lwork: *const c_int,
+ iwork: *mut c_int,
+ info: *mut c_int,
+);
+
+type DGESVD = extern "C" fn(
+ jobu: *const c_char,
+ jobvt: *const c_char,
+ m: *const c_int,
+ n: *const c_int,
+ A: *mut f64,
+ lda: *const c_int,
+ S: *mut f64,
+ U: *mut f64,
+ ldu: *const c_int,
+ VT: *mut f64,
+ ldvt: *const c_int,
+ work: *mut f64,
+ lwork: *const c_int,
+ info: *mut c_int,
+);
+
+type SGESVD = extern "C" fn(
+ jobu: *const c_char,
+ jobvt: *const c_char,
+ m: *const c_int,
+ n: *const c_int,
+ A: *mut f32,
+ lda: *const c_int,
+ S: *mut f32,
+ U: *mut f32,
+ ldu: *const c_int,
+ VT: *mut f32,
+ ldvt: *const c_int,
+ work: *mut f32,
+ lwork: *const c_int,
+ info: *mut c_int,
+);
diff --git a/src/python/pyblas/lapack_wrappers.rs b/src/python/pyblas/lapack_wrappers.rs
new file mode 100644
index 00000000..8d088507
--- /dev/null
+++ b/src/python/pyblas/lapack_wrappers.rs
@@ -0,0 +1,257 @@
+#![allow(clippy::too_many_arguments)]
+#![allow(clippy::missing_safety_doc)]
+
+use super::lapack_types::*;
+use lazy_static::lazy_static;
+use libc::c_char;
+
+lazy_static! {
+ static ref PYLAPACK: PyLapackPointers = pyo3::Python::with_gil(|py| {
+ PyLapackPointers::new(py).expect("Failed to load SciPy LAPACK bindings.")
+ });
+}
+
+pub(crate) fn force_load() {
+ //forces load of the lazy_static. Choice of function is arbitrary.
+ let _ = PYLAPACK.dsyevr_;
+}
+
+pub unsafe fn dsyevr(
+ jobz: u8,
+ range: u8,
+ uplo: u8,
+ n: i32,
+ a: &mut [f64],
+ lda: i32,
+ vl: f64,
+ vu: f64,
+ il: i32,
+ iu: i32,
+ abstol: f64,
+ m: &mut i32,
+ w: &mut [f64],
+ z: &mut [f64],
+ ldz: i32,
+ isuppz: &mut [i32],
+ work: &mut [f64],
+ lwork: i32,
+ iwork: &mut [i32],
+ liwork: i32,
+ info: &mut i32,
+) {
+ (PYLAPACK.dsyevr_)(
+ &(jobz as c_char),
+ &(range as c_char),
+ &(uplo as c_char),
+ &n,
+ a.as_mut_ptr(),
+ &lda,
+ &vl,
+ &vu,
+ &il,
+ &iu,
+ &abstol,
+ m,
+ w.as_mut_ptr(),
+ z.as_mut_ptr(),
+ &ldz,
+ isuppz.as_mut_ptr(),
+ work.as_mut_ptr(),
+ &lwork,
+ iwork.as_mut_ptr(),
+ &liwork,
+ info,
+ )
+}
+
+pub unsafe fn ssyevr(
+ jobz: u8,
+ range: u8,
+ uplo: u8,
+ n: i32,
+ a: &mut [f32],
+ lda: i32,
+ vl: f32,
+ vu: f32,
+ il: i32,
+ iu: i32,
+ abstol: f32,
+ m: &mut i32,
+ w: &mut [f32],
+ z: &mut [f32],
+ ldz: i32,
+ isuppz: &mut [i32],
+ work: &mut [f32],
+ lwork: i32,
+ iwork: &mut [i32],
+ liwork: i32,
+ info: &mut i32,
+) {
+ (PYLAPACK.ssyevr_)(
+ &(jobz as c_char),
+ &(range as c_char),
+ &(uplo as c_char),
+ &n,
+ a.as_mut_ptr(),
+ &lda,
+ &vl,
+ &vu,
+ &il,
+ &iu,
+ &abstol,
+ m,
+ w.as_mut_ptr(),
+ z.as_mut_ptr(),
+ &ldz,
+ isuppz.as_mut_ptr(),
+ work.as_mut_ptr(),
+ &lwork,
+ iwork.as_mut_ptr(),
+ &liwork,
+ info,
+ )
+}
+
+pub unsafe fn dpotrf(uplo: u8, n: i32, a: &mut [f64], lda: i32, info: &mut i32) {
+ (PYLAPACK.dpotrf_)(&(uplo as c_char), &n, a.as_mut_ptr(), &lda, info)
+}
+
+pub unsafe fn spotrf(uplo: u8, n: i32, a: &mut [f32], lda: i32, info: &mut i32) {
+ (PYLAPACK.spotrf_)(&(uplo as c_char), &n, a.as_mut_ptr(), &lda, info)
+}
+
+pub unsafe fn dgesdd(
+ jobz: u8,
+ m: i32,
+ n: i32,
+ a: &mut [f64],
+ lda: i32,
+ s: &mut [f64],
+ u: &mut [f64],
+ ldu: i32,
+ vt: &mut [f64],
+ ldvt: i32,
+ work: &mut [f64],
+ lwork: i32,
+ iwork: &mut [i32],
+ info: &mut i32,
+) {
+ (PYLAPACK.dgesdd_)(
+ &(jobz as c_char),
+ &m,
+ &n,
+ a.as_mut_ptr(),
+ &lda,
+ s.as_mut_ptr(),
+ u.as_mut_ptr(),
+ &ldu,
+ vt.as_mut_ptr(),
+ &ldvt,
+ work.as_mut_ptr(),
+ &lwork,
+ iwork.as_mut_ptr(),
+ info,
+ )
+}
+
+pub unsafe fn sgesdd(
+ jobz: u8,
+ m: i32,
+ n: i32,
+ a: &mut [f32],
+ lda: i32,
+ s: &mut [f32],
+ u: &mut [f32],
+ ldu: i32,
+ vt: &mut [f32],
+ ldvt: i32,
+ work: &mut [f32],
+ lwork: i32,
+ iwork: &mut [i32],
+ info: &mut i32,
+) {
+ (PYLAPACK.sgesdd_)(
+ &(jobz as c_char),
+ &m,
+ &n,
+ a.as_mut_ptr(),
+ &lda,
+ s.as_mut_ptr(),
+ u.as_mut_ptr(),
+ &ldu,
+ vt.as_mut_ptr(),
+ &ldvt,
+ work.as_mut_ptr(),
+ &lwork,
+ iwork.as_mut_ptr(),
+ info,
+ )
+}
+
+pub unsafe fn dgesvd(
+ jobu: u8,
+ jobvt: u8,
+ m: i32,
+ n: i32,
+ a: &mut [f64],
+ lda: i32,
+ s: &mut [f64],
+ u: &mut [f64],
+ ldu: i32,
+ vt: &mut [f64],
+ ldvt: i32,
+ work: &mut [f64],
+ lwork: i32,
+ info: &mut i32,
+) {
+ (PYLAPACK.dgesvd_)(
+ &(jobu as c_char),
+ &(jobvt as c_char),
+ &m,
+ &n,
+ a.as_mut_ptr(),
+ &lda,
+ s.as_mut_ptr(),
+ u.as_mut_ptr(),
+ &ldu,
+ vt.as_mut_ptr(),
+ &ldvt,
+ work.as_mut_ptr(),
+ &lwork,
+ info,
+ )
+}
+
+pub unsafe fn sgesvd(
+ jobu: u8,
+ jobvt: u8,
+ m: i32,
+ n: i32,
+ a: &mut [f32],
+ lda: i32,
+ s: &mut [f32],
+ u: &mut [f32],
+ ldu: i32,
+ vt: &mut [f32],
+ ldvt: i32,
+ work: &mut [f32],
+ lwork: i32,
+ info: &mut i32,
+) {
+ (PYLAPACK.sgesvd_)(
+ &(jobu as c_char),
+ &(jobvt as c_char),
+ &m,
+ &n,
+ a.as_mut_ptr(),
+ &lda,
+ s.as_mut_ptr(),
+ u.as_mut_ptr(),
+ &ldu,
+ vt.as_mut_ptr(),
+ &ldvt,
+ work.as_mut_ptr(),
+ &lwork,
+ info,
+ )
+}
diff --git a/src/python/pyblas/mod.rs b/src/python/pyblas/mod.rs
new file mode 100644
index 00000000..5d591598
--- /dev/null
+++ b/src/python/pyblas/mod.rs
@@ -0,0 +1,43 @@
+use pyo3::prelude::*;
+use pyo3::types::PyCapsule;
+
+// public blas interface
+mod blas_wrappers;
+pub use blas_wrappers::*;
+
+// public lapack interface
+mod lapack_wrappers;
+pub use lapack_wrappers::*;
+
+mod blas_loader;
+mod blas_types;
+mod lapack_loader;
+mod lapack_types;
+
+// a function to force instantiation of the blas/lapack wrappers
+// stored in lazy_statics. This function can be called during
+// initialization of the python module to ensure that lazy_statics
+// are already realised before making an FFI call to blas/lapack.
+pub fn force_load() {
+ let _ = blas_wrappers::force_load();
+ let _ = lapack_wrappers::force_load();
+}
+
+// utilities for scipy blas/lapack import
+macro_rules! get_ptr {
+ ($api: ident, $str: tt) => {
+ std::mem::transmute(get_capsule_void_ptr($api, $str)?)
+ };
+}
+pub(crate) use get_ptr;
+
+fn get_capsule_void_ptr(pyx_capi: &PyAny, name: &str) -> PyResult<*mut libc::c_void> {
+ let capsule: &PyCapsule = pyx_capi.get_item(name)?.downcast()?;
+ Ok(capsule.pointer())
+}
+
+fn get_pyx_capi<'a>(py: Python<'a>, pymodule: &str) -> PyResult<&'a PyAny> {
+ let lib = PyModule::import(py, pymodule)?;
+ let api = lib.getattr("__pyx_capi__")?;
+ Ok(api)
+}
diff --git a/src/qdldl/qdldl.rs b/src/qdldl/qdldl.rs
index b9923697..763846e4 100644
--- a/src/qdldl/qdldl.rs
+++ b/src/qdldl/qdldl.rs
@@ -2,8 +2,9 @@
use crate::algebra::*;
use core::cmp::{max, min};
use derive_builder::Builder;
+use std::iter::zip;
-/// Required settings for [QDLDLFactorisation](QDLDLFactorisation)
+/// Required settings for [`QDLDLFactorisation`](QDLDLFactorisation)
#[derive(Builder, Debug)]
pub struct QDLDLSettings {
@@ -119,7 +120,7 @@ where
let nzval = &mut self.workspace.triuA.nzval; // post perm internal data
let AtoPAPt = &self.workspace.AtoPAPt; //mapping from input matrix entries to triuA
- for (&idx, &sign) in indices.iter().zip(signs.iter()) {
+ for (&idx, &sign) in zip(indices, signs) {
let sign: T = T::from_i8(sign).unwrap();
nzval[AtoPAPt[idx]] += offset * sign;
}
@@ -360,11 +361,7 @@ fn _etree(
etree.fill(QDLDL_UNKNOWN);
//Abort if A doesn't have at least one entry in every column
- if Ap
- .iter()
- .zip(Ap.iter().skip(1))
- .any(|(¤t, &next)| current == next)
- {
+ if !Ap.windows(2).all(|c| c[0] < c[1]) {
return Err(-1);
}
@@ -429,7 +426,7 @@ fn _factor_inner(
//set Lp to cumsum(Lnz), starting from zero
Lp[0] = 0;
let mut acc = 0;
- for (Lp, Lnz) in Lp[1..].iter_mut().zip(Lnz) {
+ for (Lp, Lnz) in zip(&mut Lp[1..], Lnz) {
*Lp = acc + Lnz;
acc = *Lp;
}
@@ -603,7 +600,7 @@ fn _lsolve_safe(Lp: &[usize], Li: &[usize], Lx: &[T], x: &mut [T]) {
let (f, l) = (Lp[i], Lp[i + 1]);
let Lx = &Lx[f..l];
let Li = &Li[f..l];
- for (&Lij, &Lxj) in Li.iter().zip(Lx.iter()) {
+ for (&Lij, &Lxj) in zip(Li, Lx) {
x[Lij] -= Lxj * xi;
}
}
@@ -616,7 +613,7 @@ fn _ltsolve_safe(Lp: &[usize], Li: &[usize], Lx: &[T], x: &mut [T]) {
let (f, l) = (Lp[i], Lp[i + 1]);
let Lx = &Lx[f..l];
let Li = &Li[f..l];
- for (&Lij, &Lxj) in Li.iter().zip(Lx.iter()) {
+ for (&Lij, &Lxj) in zip(Li, Lx) {
s += Lxj * x[Lij];
}
x[i] -= s;
@@ -672,7 +669,7 @@ fn _solve(Lp: &[usize], Li: &[usize], Lx: &[T], Dinv: &[T], b: &mut [
// compatible dimensions. For super safety or debugging purposes, there
// are also `safe` versions implemented above.
_lsolve_unsafe(Lp, Li, Lx, b);
- b.iter_mut().zip(Dinv).for_each(|(b, d)| *b *= *d);
+ zip(b.iter_mut(), Dinv).for_each(|(b, d)| *b *= *d);
_ltsolve_unsafe(Lp, Li, Lx, b);
}
@@ -694,11 +691,11 @@ fn _invperm(p: &[usize]) -> Vec {
// functions that require no memory allocations
fn _permute(x: &mut [T], b: &[T], p: &[usize]) {
- p.iter().zip(x.iter_mut()).for_each(|(p, x)| *x = b[*p]);
+ zip(p, x).for_each(|(p, x)| *x = b[*p]);
}
fn _ipermute(x: &mut [T], b: &[T], p: &[usize]) {
- p.iter().zip(b.iter()).for_each(|(p, b)| x[*p] = *b);
+ zip(p, b).for_each(|(p, b)| x[*p] = *b);
}
// Given a sparse symmetric matrix `A` (with only upper triangular entries), return
@@ -769,7 +766,7 @@ fn _permute_symmetric_inner(
// Pc is one longer than num_entries here.
Pc[0] = 0;
let mut acc = 0;
- for (Pckp1, ne) in Pc[1..].iter_mut().zip(&num_entries) {
+ for (Pckp1, ne) in zip(&mut Pc[1..], &num_entries) {
*Pckp1 = acc + ne;
acc = *Pckp1;
}
diff --git a/src/qdldl/test.rs b/src/qdldl/test.rs
index 52ceab75..80e5f87f 100644
--- a/src/qdldl/test.rs
+++ b/src/qdldl/test.rs
@@ -23,9 +23,7 @@ fn test_matrix_4x4() -> CscMatrix {
}
fn inf_norm_diff(a: &[T], b: &[T]) -> T {
- a.iter()
- .zip(b)
- .fold(T::zero(), |acc, (x, y)| T::max(acc, T::abs(*x - *y)))
+ zip(a, b).fold(T::zero(), |acc, (x, y)| T::max(acc, T::abs(*x - *y)))
}
// tests some of the private functions of QDLDL. Configured
diff --git a/src/solver/core/cones/compositecone.rs b/src/solver/core/cones/compositecone.rs
index 394dea35..53a6e376 100644
--- a/src/solver/core/cones/compositecone.rs
+++ b/src/solver/core/cones/compositecone.rs
@@ -1,6 +1,8 @@
use super::*;
+use crate::algebra::triangular_number;
use crate::solver::CoreSettings;
use std::collections::HashMap;
+use std::iter::zip;
use std::ops::Range;
// -------------------------------------
@@ -116,7 +118,7 @@ where
if cone.Hs_is_diagonal() {
nvars
} else {
- (nvars * (nvars + 1)) >> 1
+ triangular_number(nvars)
}
};
rngs.push(start..stop);
@@ -168,10 +170,6 @@ impl Cone for CompositeCone
where
T: FloatT,
{
- fn dim(&self) -> usize {
- panic!("dim() not well defined for the CompositeCone");
- }
-
fn degree(&self) -> usize {
self.degree
}
@@ -190,7 +188,7 @@ where
// we will update e <- δ .* e using return values
// from this function. default is to do nothing at all
δ.fill(T::one());
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ for (cone, rng) in zip(&self.cones, &self.rng_cones) {
let δi = &mut δ[rng.clone()];
let ei = &e[rng.clone()];
any_changed |= cone.rectify_equilibration(δi, ei);
@@ -198,10 +196,10 @@ where
any_changed
}
- fn margins(&self, z: &mut [T], pd: PrimalOrDualCone) -> (T, T) {
+ fn margins(&mut self, z: &mut [T], pd: PrimalOrDualCone) -> (T, T) {
let mut α = T::max_value();
let mut β = T::zero();
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ for (cone, rng) in zip(&mut self.cones, &self.rng_cones) {
let (αi, βi) = cone.margins(&mut z[rng.clone()], pd);
α = T::min(α, αi);
β += βi;
@@ -210,13 +208,13 @@ where
}
fn scaled_unit_shift(&self, z: &mut [T], α: T, pd: PrimalOrDualCone) {
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ for (cone, rng) in zip(&self.cones, &self.rng_cones) {
cone.scaled_unit_shift(&mut z[rng.clone()], α, pd);
}
}
fn unit_initialization(&self, z: &mut [T], s: &mut [T]) {
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ for (cone, rng) in zip(&self.cones, &self.rng_cones) {
cone.unit_initialization(&mut z[rng.clone()], &mut s[rng.clone()]);
}
}
@@ -234,11 +232,8 @@ where
μ: T,
scaling_strategy: ScalingStrategy,
) -> bool {
- let cones = &mut self.cones;
- let rngs = &self.rng_cones;
-
let mut is_scaling_success;
- for (cone, rng) in cones.iter_mut().zip(rngs.iter()) {
+ for (cone, rng) in zip(&mut self.cones, &self.rng_cones) {
let si = &s[rng.clone()];
let zi = &z[rng.clone()];
is_scaling_success = cone.update_scaling(si, zi, μ, scaling_strategy);
@@ -265,19 +260,19 @@ where
#[allow(non_snake_case)]
fn get_Hs(&self, Hsblock: &mut [T]) {
- for (cone, rng) in self.iter().zip(self.rng_blocks.iter()) {
+ for (cone, rng) in zip(&self.cones, &self.rng_blocks) {
cone.get_Hs(&mut Hsblock[rng.clone()]);
}
}
- fn mul_Hs(&self, y: &mut [T], x: &[T], work: &mut [T]) {
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ fn mul_Hs(&mut self, y: &mut [T], x: &[T], work: &mut [T]) {
+ for (cone, rng) in zip(&mut self.cones, &self.rng_cones) {
cone.mul_Hs(&mut y[rng.clone()], &x[rng.clone()], &mut work[rng.clone()]);
}
}
fn affine_ds(&self, ds: &mut [T], s: &[T]) {
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ for (cone, rng) in zip(&self.cones, &self.rng_cones) {
let dsi = &mut ds[rng.clone()];
let si = &s[rng.clone()];
cone.affine_ds(dsi, si);
@@ -293,10 +288,7 @@ where
// nonsymmetric cones modify their internal state when
// computing the ds_shift
- let cones = &mut self.cones;
- let rngs = &self.rng_cones;
-
- for (cone, rng) in cones.iter_mut().zip(rngs) {
+ for (cone, rng) in zip(&mut self.cones, &self.rng_cones) {
let shifti = &mut shift[rng.clone()];
let step_zi = &mut step_z[rng.clone()];
let step_si = &mut step_s[rng.clone()];
@@ -304,8 +296,8 @@ where
}
}
- fn Δs_from_Δz_offset(&self, out: &mut [T], ds: &[T], work: &mut [T], z: &[T]) {
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ fn Δs_from_Δz_offset(&mut self, out: &mut [T], ds: &[T], work: &mut [T], z: &[T]) {
+ for (cone, rng) in zip(&mut self.cones, &self.rng_cones) {
let outi = &mut out[rng.clone()];
let dsi = &ds[rng.clone()];
let worki = &mut work[rng.clone()];
@@ -315,7 +307,7 @@ where
}
fn step_length(
- &self,
+ &mut self,
dz: &[T],
ds: &[T],
z: &[T],
@@ -326,7 +318,7 @@ where
let mut α = αmax;
// Force symmetric cones first.
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ for (cone, rng) in zip(&mut self.cones, &self.rng_cones) {
if !cone.is_symmetric() {
continue;
}
@@ -343,7 +335,7 @@ where
α = T::min(settings.max_step_fraction, α);
}
// Force asymmetric cones last.
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ for (cone, rng) in zip(&mut self.cones, &self.rng_cones) {
if cone.is_symmetric() {
continue;
}
@@ -358,7 +350,7 @@ where
fn compute_barrier(&self, z: &[T], s: &[T], dz: &[T], ds: &[T], α: T) -> T {
let mut barrier = T::zero();
- for (cone, rng) in self.iter().zip(self.rng_cones.iter()) {
+ for (cone, rng) in zip(&self.cones, &self.rng_cones) {
let zi = &z[rng.clone()];
let si = &s[rng.clone()];
let dzi = &dz[rng.clone()];
diff --git a/src/solver/core/cones/expcone.rs b/src/solver/core/cones/expcone.rs
index 1ec44936..c38b1506 100644
--- a/src/solver/core/cones/expcone.rs
+++ b/src/solver/core/cones/expcone.rs
@@ -41,16 +41,12 @@ impl Cone for ExponentialCone
where
T: FloatT,
{
- fn dim(&self) -> usize {
- 3
- }
-
fn degree(&self) -> usize {
- self.dim()
+ 3
}
fn numel(&self) -> usize {
- self.dim()
+ 3
}
fn is_symmetric(&self) -> bool {
@@ -62,7 +58,7 @@ where
true // scalar equilibration
}
- fn margins(&self, _z: &mut [T], _pd: PrimalOrDualCone) -> (T, T) {
+ fn margins(&mut self, _z: &mut [T], _pd: PrimalOrDualCone) -> (T, T) {
// We should never end up shifting to this cone, since
// asymmetric problems should always use unit_initialization
unreachable!();
@@ -116,7 +112,7 @@ where
Hsblock.copy_from(&self.Hs.data);
}
- fn mul_Hs(&self, y: &mut [T], x: &[T], _work: &mut [T]) {
+ fn mul_Hs(&mut self, y: &mut [T], x: &[T], _work: &mut [T]) {
self.Hs.mul(y, x);
}
@@ -135,12 +131,12 @@ where
}
}
- fn Δs_from_Δz_offset(&self, out: &mut [T], ds: &[T], _work: &mut [T], _z: &[T]) {
+ fn Δs_from_Δz_offset(&mut self, out: &mut [T], ds: &[T], _work: &mut [T], _z: &[T]) {
out.copy_from(ds);
}
fn step_length(
- &self,
+ &mut self,
dz: &[T],
ds: &[T],
z: &[T],
diff --git a/src/solver/core/cones/mod.rs b/src/solver/core/cones/mod.rs
index 6523c50c..caa31c5e 100644
--- a/src/solver/core/cones/mod.rs
+++ b/src/solver/core/cones/mod.rs
@@ -1,37 +1,35 @@
#![allow(non_snake_case)]
use crate::algebra::FloatT;
+use crate::solver::{core::ScalingStrategy, CoreSettings};
use enum_dispatch::*;
-//primitive cone types
+// the supported cone wrapper type for primitives
+// and the composite cone
+mod compositecone;
+mod supportedcone;
+// primitive cone types
mod expcone;
mod nonnegativecone;
mod powcone;
mod socone;
mod zerocone;
-
-//the supported cone wrapper type for primitives
-//and the composite cone
-mod compositecone;
-mod supportedcone;
-
-//partially specialized traits and blanket implementataions
+// partially specialized traits and blanket implementataions
mod exppow_common;
mod symmetric_common;
-//flatten all cone implementations to appear in this module
-pub use compositecone::*;
-pub use compositecone::*;
-pub use expcone::*;
+//re-export everything to appear as one module
use exppow_common::*;
-pub use nonnegativecone::*;
-pub use powcone::*;
-pub use socone::*;
-pub use supportedcone::*;
-pub use symmetric_common::*;
-pub use zerocone::*;
+pub use {
+ compositecone::*, expcone::*, nonnegativecone::*, powcone::*, socone::*, supportedcone::*,
+ symmetric_common::*, zerocone::*,
+};
-use crate::solver::{core::ScalingStrategy, CoreSettings};
+// only use PSD cones with SDP/Blas enabled
+#[cfg(feature = "sdp")]
+mod psdtrianglecone;
+#[cfg(feature = "sdp")]
+pub use psdtrianglecone::*;
// marker for primal / dual distinctions
#[derive(Eq, PartialEq, Clone, Debug, Copy)]
@@ -46,7 +44,6 @@ where
T: FloatT,
{
// functions relating to basic sizing
- fn dim(&self) -> usize;
fn degree(&self) -> usize;
fn numel(&self) -> usize;
@@ -65,7 +62,7 @@ where
// cones this will just be β = max(0.,α), but for cones that
// are composites (e.g. the R_n^+), it is the sum of all of
// the positive margin terms.
- fn margins(&self, z: &mut [T], pd: PrimalOrDualCone) -> (T, T);
+ fn margins(&mut self, z: &mut [T], pd: PrimalOrDualCone) -> (T, T);
// functions relating to unit vectors and cone initialization
fn scaled_unit_shift(&self, z: &mut [T], α: T, pd: PrimalOrDualCone);
@@ -82,7 +79,7 @@ where
// : μH(s) for nonsymmetric cones
fn Hs_is_diagonal(&self) -> bool;
fn get_Hs(&self, Hsblock: &mut [T]);
- fn mul_Hs(&self, y: &mut [T], x: &[T], work: &mut [T]);
+ fn mul_Hs(&mut self, y: &mut [T], x: &[T], work: &mut [T]);
// ---------------------------------------------------------
// Linearized centrality condition functions
@@ -131,11 +128,11 @@ where
// ---------------------------------------------------------
fn affine_ds(&self, ds: &mut [T], s: &[T]);
fn combined_ds_shift(&mut self, shift: &mut [T], step_z: &mut [T], step_s: &mut [T], σμ: T);
- fn Δs_from_Δz_offset(&self, out: &mut [T], ds: &[T], work: &mut [T], z: &[T]);
+ fn Δs_from_Δz_offset(&mut self, out: &mut [T], ds: &[T], work: &mut [T], z: &[T]);
// Find the maximum step length in some search direction
fn step_length(
- &self,
+ &mut self,
dz: &[T],
ds: &[T],
z: &[T],
diff --git a/src/solver/core/cones/nonnegativecone.rs b/src/solver/core/cones/nonnegativecone.rs
index 0a21be71..55bb4f8d 100644
--- a/src/solver/core/cones/nonnegativecone.rs
+++ b/src/solver/core/cones/nonnegativecone.rs
@@ -3,6 +3,7 @@ use crate::{
algebra::*,
solver::{core::ScalingStrategy, CoreSettings},
};
+use std::iter::zip;
// -------------------------------------
// Nonnegative Cone
@@ -31,16 +32,12 @@ impl Cone for NonnegativeCone
where
T: FloatT,
{
- fn dim(&self) -> usize {
- self.dim
- }
-
fn degree(&self) -> usize {
- self.dim()
+ self.dim
}
fn numel(&self) -> usize {
- self.dim()
+ self.dim
}
fn is_symmetric(&self) -> bool {
@@ -52,7 +49,7 @@ where
false
}
- fn margins(&self, z: &mut [T], _pd: PrimalOrDualCone) -> (T, T) {
+ fn margins(&mut self, z: &mut [T], _pd: PrimalOrDualCone) -> (T, T) {
let α = z.minimum();
let β = z.iter().fold(T::zero(), |β, &zi| β + T::max(zi, T::zero()));
(α, β)
@@ -78,10 +75,7 @@ where
_μ: T,
_scaling_strategy: ScalingStrategy,
) -> bool {
- let λw = self.λ.iter_mut().zip(self.w.iter_mut());
- let sz = s.iter().zip(z.iter());
-
- for ((λ, w), (s, z)) in λw.zip(sz) {
+ for ((λ, w), (s, z)) in zip(zip(&mut self.λ, &mut self.w), zip(s, z)) {
*λ = T::sqrt((*s) * (*z));
*w = T::sqrt((*s) / (*z));
}
@@ -95,21 +89,21 @@ where
fn get_Hs(&self, Hsblock: &mut [T]) {
assert_eq!(self.w.len(), Hsblock.len());
- for (blki, &wi) in Hsblock.iter_mut().zip(self.w.iter()) {
+ for (blki, &wi) in zip(Hsblock, &self.w) {
*blki = wi * wi;
}
}
- fn mul_Hs(&self, y: &mut [T], x: &[T], _work: &mut [T]) {
+ fn mul_Hs(&mut self, y: &mut [T], x: &[T], _work: &mut [T]) {
//NB : seemingly sensitive to order of multiplication
for (yi, (&wi, &xi)) in y.iter_mut().zip(self.w.iter().zip(x)) {
- *yi = wi * (wi * xi);
+ *yi = wi * (wi * xi)
}
}
fn affine_ds(&self, ds: &mut [T], _s: &[T]) {
assert_eq!(self.λ.len(), ds.len());
- for (dsi, &λi) in ds.iter_mut().zip(self.λ.iter()) {
+ for (dsi, &λi) in zip(ds, &self.λ) {
*dsi = λi * λi;
}
}
@@ -119,14 +113,14 @@ where
self._combined_ds_shift_symmetric(dz, step_z, step_s, σμ);
}
- fn Δs_from_Δz_offset(&self, out: &mut [T], ds: &[T], _work: &mut [T], z: &[T]) {
- for (outi, (&dsi, &zi)) in out.iter_mut().zip(ds.iter().zip(z)) {
+ fn Δs_from_Δz_offset(&mut self, out: &mut [T], ds: &[T], _work: &mut [T], z: &[T]) {
+ for (outi, (&dsi, &zi)) in zip(out, zip(ds, z)) {
*outi = dsi / zi;
}
}
fn step_length(
- &self,
+ &mut self,
dz: &[T],
ds: &[T],
z: &[T],
@@ -157,9 +151,7 @@ where
assert_eq!(dz.len(), z.len());
assert_eq!(ds.len(), s.len());
let mut barrier = T::zero();
- let s_ds = s.iter().zip(ds.iter());
- let z_dz = z.iter().zip(dz.iter());
- for ((&s, &ds), (&z, &dz)) in s_ds.zip(z_dz) {
+ for ((&s, &ds), (&z, &dz)) in zip(zip(s, ds), zip(z, dz)) {
let si = s + α * ds;
let zi = z + α * dz;
barrier += (si * zi).logsafe();
@@ -176,11 +168,11 @@ impl SymmetricCone for NonnegativeCone
where
T: FloatT,
{
- fn λ_inv_circ_op(&self, x: &mut [T], z: &[T]) {
- self.inv_circ_op(x, &self.λ, z);
+ fn λ_inv_circ_op(&mut self, x: &mut [T], z: &[T]) {
+ _inv_circ_op(x, &self.λ, z);
}
- fn mul_W(&self, _is_transpose: MatrixShape, y: &mut [T], x: &[T], α: T, β: T) {
+ fn mul_W(&mut self, _is_transpose: MatrixShape, y: &mut [T], x: &[T], α: T, β: T) {
assert_eq!(y.len(), x.len());
assert_eq!(y.len(), self.w.len());
for i in 0..y.len() {
@@ -188,7 +180,7 @@ where
}
}
- fn mul_Winv(&self, _is_transpose: MatrixShape, y: &mut [T], x: &[T], α: T, β: T) {
+ fn mul_Winv(&mut self, _is_transpose: MatrixShape, y: &mut [T], x: &[T], α: T, β: T) {
assert_eq!(y.len(), x.len());
assert_eq!(y.len(), self.w.len());
for i in 0..y.len() {
@@ -205,19 +197,32 @@ impl JordanAlgebra for NonnegativeCone
where
T: FloatT,
{
- fn circ_op(&self, x: &mut [T], y: &[T], z: &[T]) {
- let yz = y.iter().zip(z.iter());
+ fn circ_op(&mut self, x: &mut [T], y: &[T], z: &[T]) {
+ _circ_op(x, y, z);
+ }
- for (x, (y, z)) in x.iter_mut().zip(yz) {
- *x = (*y) * (*z);
- }
+ fn inv_circ_op(&mut self, x: &mut [T], y: &[T], z: &[T]) {
+ _inv_circ_op(x, y, z);
}
+}
- fn inv_circ_op(&self, x: &mut [T], y: &[T], z: &[T]) {
- let yz = y.iter().zip(z.iter());
+// circ ops don't use self for this cone, so put the actual
+// implementations outside so that they can be called by
+// other functions with entering borrow check hell
+fn _circ_op(x: &mut [T], y: &[T], z: &[T])
+where
+ T: FloatT,
+{
+ for (x, (y, z)) in zip(x, zip(y, z)) {
+ *x = (*y) * (*z);
+ }
+}
- for (x, (y, z)) in x.iter_mut().zip(yz) {
- *x = (*z) / (*y);
- }
+fn _inv_circ_op(x: &mut [T], y: &[T], z: &[T])
+where
+ T: FloatT,
+{
+ for (x, (y, z)) in zip(x, zip(y, z)) {
+ *x = (*z) / (*y);
}
}
diff --git a/src/solver/core/cones/powcone.rs b/src/solver/core/cones/powcone.rs
index 0cd67619..59a92a60 100644
--- a/src/solver/core/cones/powcone.rs
+++ b/src/solver/core/cones/powcone.rs
@@ -43,16 +43,12 @@ impl Cone for PowerCone
where
T: FloatT,
{
- fn dim(&self) -> usize {
- 3
- }
-
fn degree(&self) -> usize {
- self.dim()
+ 3
}
fn numel(&self) -> usize {
- self.dim()
+ 3
}
fn is_symmetric(&self) -> bool {
@@ -64,7 +60,7 @@ where
true // scalar equilibration
}
- fn margins(&self, _z: &mut [T], _pd: PrimalOrDualCone) -> (T, T) {
+ fn margins(&mut self, _z: &mut [T], _pd: PrimalOrDualCone) -> (T, T) {
// We should never end up shifting to this cone, since
// asymmetric problems should always use unit_initialization
unreachable!();
@@ -119,7 +115,7 @@ where
Hsblock.copy_from(&self.Hs.data);
}
- fn mul_Hs(&self, y: &mut [T], x: &[T], _work: &mut [T]) {
+ fn mul_Hs(&mut self, y: &mut [T], x: &[T], _work: &mut [T]) {
self.Hs.mul(y, x);
}
@@ -138,12 +134,12 @@ where
}
}
- fn Δs_from_Δz_offset(&self, out: &mut [T], ds: &[T], _work: &mut [T], _z: &[T]) {
+ fn Δs_from_Δz_offset(&mut self, out: &mut [T], ds: &[T], _work: &mut [T], _z: &[T]) {
out.copy_from(ds);
}
fn step_length(
- &self,
+ &mut self,
dz: &[T],
ds: &[T],
z: &[T],
diff --git a/src/solver/core/cones/psdtrianglecone.rs b/src/solver/core/cones/psdtrianglecone.rs
new file mode 100644
index 00000000..0be13fb0
--- /dev/null
+++ b/src/solver/core/cones/psdtrianglecone.rs
@@ -0,0 +1,504 @@
+use super::*;
+use crate::{
+ algebra::*,
+ solver::{core::ScalingStrategy, CoreSettings},
+};
+
+// ------------------------------------
+// Positive Semidefinite Cone (Scaled triangular form)
+// ------------------------------------
+
+pub struct PSDConeWork {
+ cholS: CholeskyEngine,
+ cholZ: CholeskyEngine,
+ SVD: SVDEngine,
+ Eig: EigEngine,
+ λ: Vec,
+ Λisqrt: Vec,
+ R: Matrix,
+ Rinv: Matrix,
+ kronRR: Matrix