Skip to content

Commit

Permalink
Increase error tolerance, pull in sleipnir conditioning
Browse files Browse the repository at this point in the history
  • Loading branch information
mcm001 committed Jan 9, 2025
1 parent 7c7bd0e commit 398a078
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 42 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@
#include "../generate/casadi_meme_9_tags_False.h"
#include "../generate/casadi_meme_9_tags_True.h"

constexpr bool VERBOSE = false;

struct Problem {
int numTags;
bool headingFree;
Expand Down Expand Up @@ -180,7 +182,8 @@ void IterativeRefinement(const Solver& solver,
}

template <int nState>
wpi::expected<casadi_meme::RobotStateMat, bool> do_optimization(
wpi::expected<casadi_meme::RobotStateMat, sleipnir::SolverExitCondition>
do_optimization(
int nTags, casadi_meme::CameraCalibration cameraCal,
// Note that casadi is column major, apparently
Eigen::Matrix<casadi_real, 4, 4, Eigen::ColMajor> robot2camera,
Expand All @@ -190,18 +193,16 @@ wpi::expected<casadi_meme::RobotStateMat, bool> do_optimization(
point_observations) {
if (field2points.cols() != (nTags * 4) ||
point_observations.cols() != (nTags * 4)) {
fmt::println("Got unexpected num cols!");
return wpi::unexpected{false};
if constexpr (VERBOSE) fmt::println("Got unexpected num cols!");
// TODO find a new error code
return wpi::unexpected{
sleipnir::SolverExitCondition::kNonfiniteInitialCostOrConstraints};
}

#if 0
if constexpr (VERBOSE) {
fmt::println("----------------------------------");
fmt::println("Camera cal {} {} {} {}",
cameraCal.fx,
cameraCal.fy,
cameraCal.cx,
cameraCal.cy
);
fmt::println("Camera cal {} {} {} {}", cameraCal.fx, cameraCal.fy,
cameraCal.cx, cameraCal.cy);
fmt::println("{} tags", nTags);
fmt::println("nstate {}", nState);

Expand All @@ -210,11 +211,12 @@ wpi::expected<casadi_meme::RobotStateMat, bool> do_optimization(
std::cout << "field2pt:\n" << field2points << std::endl;
std::cout << "observations:\n" << point_observations << std::endl;
fmt::println("---------^^^^^^^^---------");
#endif
}

auto problemOpt = createProblem(nTags, nState == 3);
if (!problemOpt) {
return wpi::unexpected{false};
return wpi::unexpected{
sleipnir::SolverExitCondition::kNonfiniteInitialCostOrConstraints};
}
ProblemState<nState> pState{robot2camera, field2points, point_observations,
cameraCal, *problemOpt};
Expand All @@ -228,30 +230,48 @@ wpi::expected<casadi_meme::RobotStateMat, bool> do_optimization(
FullStateMat x_full = x_guess;
auto x = x_full.template head<nState>();

// Sleipnir's delta_I caching algo from
// Sleipnir's delta_I caching algo and Newton.cpp inspiration from
// https://github.com/SleipnirGroup/Sleipnir/blob/5af8519f268a8075e245bb7cd411a81e1598f521/src/optimization/RegularizedLDLT.hpp#L163
// licensed under BSD 3-Clause

/// The value of δ from the previous iteration - 1e-4 is a sane guess
/// TUNABLE
double δ = 1e-4 * 2.0;

constexpr double ERROR_TOL = 1e-4;

for (int iter = 0; iter < 100; iter++) {
auto iter_start = wpi::Now();

HessianMat H = pState.calculateHessJ(x_full);
// Check for diverging iterates
if (x_full.template lpNorm<Eigen::Infinity>() > 1e20 ||
!x_full.allFinite()) {
return wpi::unexpected{sleipnir::SolverExitCondition::kDivergingIterates};
}

GradMat g = pState.calculateGradJ(x_full);

// If our previous step found an x such grad(J) is acceptable, we're done
if (g.norm() < ERROR_TOL) {
// Done!
fmt::println("{}: Exiting due to convergence (‖∇J‖={})", iter, g.norm());
break;
}

HessianMat H = pState.calculateHessJ(x_full);

/// Regularization. If the Hessian inertia is already OK, don't adjust

auto H_ldlt = H.ldlt();
if (H_ldlt.info() != Eigen::Success) {
std::cerr << "LDLT decomp failed! H=" << std::endl << H << std::endl;
return wpi::unexpected{false};
return wpi::unexpected{sleipnir::SolverExitCondition::kLocallyInfeasible};
}

// Make sure H is positive definite (all eigenvalues are > 0)
int i_reg{0};
HessianMat H_reg = H; // keep track of our regularized H TODO - is this valid?
HessianMat H_reg =
H; // keep track of our regularized H TODO - is this valid?
if ((H_ldlt.vectorD().array() <= 0.0).any()) {
// If δthe Hessian wasn't regularized in a previous iteration, start at a
// small value of δ. Otherwise, attempt a δ half as big as the previous
Expand All @@ -269,7 +289,8 @@ wpi::expected<casadi_meme::RobotStateMat, bool> do_optimization(

if (H_ldlt.info() != Eigen::Success) {
std::cerr << "LDLT decomp failed! H=" << std::endl << H << std::endl;
return wpi::unexpected{false};
return wpi::unexpected{
sleipnir::SolverExitCondition::kLocallyInfeasible};
}

// If our eigenvalues aren't positive definite, pick a new δ for next
Expand All @@ -279,7 +300,8 @@ wpi::expected<casadi_meme::RobotStateMat, bool> do_optimization(

// If the Hessian perturbation is too high, report failure
if (δ > 1e20) {
return wpi::unexpected{false};
return wpi::unexpected{
sleipnir::SolverExitCondition::kLocallyInfeasible};
}
} else {
// Done!
Expand All @@ -290,57 +312,75 @@ wpi::expected<casadi_meme::RobotStateMat, bool> do_optimization(
}

if (i_reg == MAX_REG_STEPS) {
return wpi::unexpected{false};
return wpi::unexpected{
sleipnir::SolverExitCondition::kLocallyInfeasible};
}
} else {
// std::printf("Already regularized\n");
}

// Solve for p_x, and refine our solution
auto Hsolver = H.fullPivLu();
auto Hsolver = H_ldlt; // H.fullPivLu();
StateMat p_x = Hsolver.solve(-g);
// IterativeRefinement<decltype(Hsolver), nState>(Hsolver, H_reg, p_x, -g);

casadi_real old_cost = pState.calculateJ(x_full);
double alpha = 1.0;
constexpr double α_max = 1.0;
double alpha = α_max;

// Iterate until our chosen trial_x decreases our cost
int alpha_refinement{0};
FullStateMat trial_x = x_full;
for (alpha_refinement = 0; alpha_refinement < 100; alpha_refinement++) {
FullStateMat trial_x = x_full;
trial_x.template head<nState>() = x + alpha * p_x;

casadi_real new_cost = pState.calculateJ(trial_x);

// If f(xₖ + αpₖˣ) isn't finite, reduce step size immediately
if (!std::isfinite(new_cost)) {
// Reduce step size
alpha *= 0.5;
continue;
}

// Make sure we see an improvement
if (new_cost < old_cost) {
// Step accepted - update x
x = trial_x.template head<nState>();
break;
} else {
alpha *= 0.5;
}
}

if (g.norm() < 1e-8) {
// Done!
break;
// Safety factor for the minimal step size
constexpr double α_min_frac = 0.05;
constexpr double γConstraint = 1e-5;

// If our step size shrank too much, report local infesibility
if (alpha < α_min_frac * γConstraint) {
return wpi::unexpected{
sleipnir::SolverExitCondition::kLocallyInfeasible};
}
}
}

auto iter_end = wpi::Now();
fmt::println(
"{}: {} uS, ‖∇J‖={}, α={} ({} refinement steps), {} regularization "
"steps",
iter, iter_end - iter_start, g.norm(), alpha, alpha_refinement, i_reg);
fmt::println("∇J={}", g);
fmt::println("H={}", H);
fmt::println("p_x={}", p_x);
fmt::println("|Hp_x + ∇f|₂={}", (H * p_x + g).norm());
if constexpr (VERBOSE) {
fmt::println(
"{}: {} uS, ‖∇J‖={}, α={} ({} refinement steps), {} regularization "
"steps",
iter, iter_end - iter_start, g.norm(), alpha, alpha_refinement,
i_reg);
// fmt::println("∇J={}", g);
// fmt::println("H={}", H);
// fmt::println("p_x={}", p_x);
// fmt::println("|Hp_x + ∇f|₂={}", (H * p_x + g).norm());
}
}
fmt::println("======================");
if constexpr (VERBOSE) fmt::println("======================");

return x_full;
}

wpi::expected<casadi_meme::RobotStateMat, bool>
wpi::expected<casadi_meme::RobotStateMat, sleipnir::SolverExitCondition>
casadi_meme::do_optimization_heading_free(
int nTags, casadi_meme::CameraCalibration cameraCal,
// Note that casadi is column major, apparently
Expand All @@ -353,7 +393,7 @@ casadi_meme::do_optimization_heading_free(
field2points, point_observations);
}

wpi::expected<casadi_meme::RobotStateMat, bool>
wpi::expected<casadi_meme::RobotStateMat, sleipnir::SolverExitCondition>
casadi_meme::do_optimization_heading_fixed(
int nTags, casadi_meme::CameraCalibration cameraCal,
// Note that casadi is column major, apparently
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#pragma once

#include <Eigen/Core>
#include <sleipnir/optimization/SolverExitCondition.hpp>
#include <wpi/expected>

namespace casadi_meme {
Expand All @@ -39,7 +40,8 @@ using RobotStateMat = Eigen::Matrix<casadi_real, 3, 1>;
* to this. The number of columns in field2points and point_observations just be
* exactly 4x nTags.
*/
wpi::expected<RobotStateMat, bool> do_optimization_heading_free(
wpi::expected<RobotStateMat, sleipnir::SolverExitCondition>
do_optimization_heading_free(
int nTags, CameraCalibration cameraCal,
// Note that casadi is column major, apparently
Eigen::Matrix<casadi_real, 4, 4, Eigen::ColMajor> robot2camera,
Expand All @@ -52,7 +54,8 @@ wpi::expected<RobotStateMat, bool> do_optimization_heading_free(
* this. The number of columns in field2points and point_observations just be
* exactly 4x nTags.
*/
wpi::expected<RobotStateMat, bool> do_optimization_heading_fixed(
wpi::expected<RobotStateMat, sleipnir::SolverExitCondition>
do_optimization_heading_fixed(
int nTags, CameraCalibration cameraCal,
// Note that casadi is column major, apparently
Eigen::Matrix<casadi_real, 4, 4, Eigen::ColMajor> robot2camera,
Expand Down
3 changes: 2 additions & 1 deletion photon-targeting/src/main/native/jni/CasadiMemeJNI.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,8 @@ Java_org_photonvision_jni_CasadiMeme_do_1optimization
std::cout << "observations:\n" << pointObservationsMat << std::endl;
#endif

wpi::expected<casadi_meme::RobotStateMat, bool> result;
wpi::expected<casadi_meme::RobotStateMat, sleipnir::SolverExitCondition>
result;
if (headingFree) {
result = casadi_meme::do_optimization_heading_free(
nTags, cameraCal_, robot2cameraMat, xGuessMat, field2pointsMat,
Expand Down

0 comments on commit 398a078

Please sign in to comment.