From f7418509eda9cca7daf3f617bf6175d974a2e7b1 Mon Sep 17 00:00:00 2001 From: duembgen Date: Thu, 14 Sep 2023 11:13:12 -0400 Subject: [PATCH 1/9] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b26d48e..67fc96a 100644 --- a/README.md +++ b/README.md @@ -27,4 +27,5 @@ conda env create -f environment.yml ## Author -Frederike Dümbgen +- Frederike Dümbgen +- Connor Holmes From 2934d239a38c09b67bf72b34e8edd2d0aa6203c6 Mon Sep 17 00:00:00 2001 From: Frederike Duembgen Date: Fri, 29 Dec 2023 14:36:16 -0500 Subject: [PATCH 2/9] Fix tests --- _test/test_parameters.py | 146 +++------------------------------- _test/test_solvers.py | 42 +++++----- _test/test_special_cases.py | 3 +- _test/test_state_lifter.py | 20 +++-- _test/test_tools.py | 7 +- certifiable-tools | 2 +- lifters/poly_lifters.py | 17 ++-- lifters/robust_pose_lifter.py | 11 +-- lifters/state_lifter.py | 19 ++--- lifters/stereo1d_lifter.py | 6 +- lifters/stereo2d_problem.py | 25 ++++-- poly_matrix | 2 +- utils/constraint.py | 2 +- utils/geometry.py | 3 +- 14 files changed, 93 insertions(+), 212 deletions(-) diff --git a/_test/test_parameters.py b/_test/test_parameters.py index e27d2fa..5f09cda 100644 --- a/_test/test_parameters.py +++ b/_test/test_parameters.py @@ -5,9 +5,6 @@ from lifters.stereo2d_lifter import Stereo2DLifter from poly_matrix.poly_matrix import PolyMatrix -INCREMENTAL = True -NORMALIZE = True - def test_canonical_operations(): n_landmarks = 1 # z_0 and z_1 @@ -17,12 +14,12 @@ def test_canonical_operations(): # fmt: off Ai_sub = np.array( [ - [1, 0, 0, 0, 0, 0], - [0, 0, .5, 0, 0, 0], - [0, .5, 0, 0, .5, 0], - [0, 0, 0, 1, 0, 0], - [0, 0, .5, 0, 0, 0], - [0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0], + [0, 0,.5, 0, 0, 0], + [0,.5, 0, 0,.5, 0], + [0, 0, 0, 1, 0, 0], + [0, 0,.5, 0, 0, 0], + [0, 0, 0, 0, 0, 0], ] ) Ai_poly = PolyMatrix(symmetric=True) @@ -44,39 +41,6 @@ def test_canonical_operations(): def test_learned_constraints(d=2, param_level="ppT"): - n_landmarks = 2 # z_0 and z_1 - if d == 1: - lifter = Stereo1DLifter(n_landmarks=n_landmarks, param_level=param_level) - elif d == 2: - lifter = Stereo2DLifter( - n_landmarks=n_landmarks, param_level=param_level, level="urT" - ) - else: - raise ValueError(d) - - var_subset = ["l", "z_0", "z_1"] - label_dict = lifter.var_dict_row(var_subset) - - basis_row_list = lifter.get_basis_list(var_subset, plot=False) - basis_list = [ - basis_row.get_matrix((["l"], label_dict)) for basis_row in basis_row_list - ] - A_learned = lifter.generate_matrices(basis_list, var_dict=var_subset) - - # first, test that the learned constraints actually work on the original setup. - np.random.seed(0) - lifter.test_constraints(A_learned, errors="print") - - # then, with parameters, we can regenerate new learned variables for each new random setup. - for i in range(0, 10): - print(f"---- {i} -----") - np.random.seed(i) - lifter.generate_random_setup() - A_learned = lifter.generate_matrices(basis_list, var_dict=var_subset) - lifter.test_constraints(A_learned, errors="print") - - -def test_learned_constraints_augment(d=2, param_level="ppT"): n_landmarks = 2 # z_0 and z_1 if d == 1: lifter = Stereo1DLifter(n_landmarks=n_landmarks, param_level="p") @@ -87,46 +51,18 @@ def test_learned_constraints_augment(d=2, param_level="ppT"): else: raise ValueError(d) - if INCREMENTAL: - var_subset = ["l", "z_0", "z_1"] - label_dict = lifter.var_dict_row(var_subset) - - basis_list = lifter.get_basis_list(var_subset, plot=False) - basis_list_all = lifter.apply_templates(basis_list) - basis_poly_all = PolyMatrix.init_from_row_list(basis_list_all) - - basis_learned = basis_poly_all.get_matrix( - variables=(basis_poly_all.variable_dict_i, label_dict) - ) - A_learned = lifter.generate_matrices(basis_list, var_dict=var_subset) - else: - var_subset = list(lifter.var_dict.keys()) - label_dict = lifter.var_dict_row(var_subset) + A_learned = lifter.get_A_learned() - A_learned, basis_poly = lifter.get_A_learned(normalize=NORMALIZE) - basis_learned = basis_poly.get_matrix( - variables=(basis_poly.variable_dict_i, label_dict) - ) - - # first, test that the learned constraints actually work on the original setup. np.random.seed(0) lifter.test_constraints(A_learned, errors="raise") - # then, with parameters, we can regenerate new learned variables for each new random setup. - for i in range(10): - np.random.seed(i) - lifter.generate_random_setup() - A_learned = lifter.generate_matrices(basis_learned, var_dict=var_subset) - - lifter.test_constraints(A_learned, errors="print") - def test_b_to_a(): n_landmarks = 1 # z_0 only lifter = Stereo2DLifter(n_landmarks=n_landmarks, param_level="p", level="no") - for var_subset in [("l", "x"), ("l", "x", "z_0")]: + for var_subset in [("h", "x"), ("h", "x", "z_0")]: Y = lifter.generate_Y(var_subset=var_subset) basis_new, S = lifter.get_basis(Y) for i, bi_sub in enumerate(basis_new[:10, :]): @@ -151,70 +87,12 @@ def test_b_to_a(): ai_sub = lifter.get_reduced_a(bi_sub, var_subset=var_subset) assert abs(ai_sub @ x_sub) < 1e-10 - # put back in all other variables. - - bi_all = lifter.zero_pad_subvector( - bi_sub, var_subset, target_subset=lifter.var_dict - ) - # bi_all = lifter.get_vector_dense(bi_poly) - - if var_subset == tuple(lifter.var_dict.keys()): - try: - np.testing.assert_allclose(bi_all, bi_sub) - except: - fig, axs = plt.subplots(2, 1) - axs[0].matshow(bi_sub[None, :]) - axs[0].set_title("bi sub") - axs[1].matshow(bi_all[None, :]) - axs[1].set_title("bi all") - raise - - # generate the full matrix, placing the subvar in the correct place. - Ai = lifter.get_mat(ai_sub, var_dict=var_dict) - ai_all_test = lifter.get_vec(Ai) - - # generate the full matrix directly from the test vector - ai_all = lifter.get_reduced_a(bi_all) - Ai_test = lifter.get_mat(ai_all) - try: - np.testing.assert_allclose(ai_all_test, ai_all) - except: - fig, axs = plt.subplots(2, 1) - axs[0].matshow(ai_all[None, :]) - axs[0].set_title("ai all") - axs[1].matshow(ai_all_test[None, :]) - axs[1].set_title("ai all test") - raise - - try: - np.testing.assert_allclose(Ai.toarray(), Ai_test) - except: - fig, axs = plt.subplots(1, 2) - axs[0].matshow(Ai.toarray()) - axs[0].set_title("Ai") - axs[1].matshow(Ai_test) - axs[1].set_title("Ai test") - raise - - assert len(bi_all) == lifter.get_dim_X() * lifter.get_dim_P() - - x = lifter.get_x() - x_all = lifter.get_vec(np.outer(x, x)) - x_all_aug = lifter.augment_using_parameters(x_all) - - # test that x_all_aug @ bi_all holds (x includes parameters) - assert abs(bi_all @ x_all_aug) < 1e-10 - - ai_all = lifter.get_reduced_a(bi_all) - # test that ai_all @ x_all holds. - assert abs(ai_all @ x_all) < 1e-10 - def test_zero_padding(): n_landmarks = 1 # z_0 only lifter = Stereo2DLifter(n_landmarks=n_landmarks, param_level="p", level="no") - for var_subset in [("l", "x"), ("l", "x", "z_0")]: + for var_subset in [("h", "x"), ("h", "x", "z_0")]: var_dict = lifter.get_var_dict(var_subset) # get new patterns for this subset. Y = lifter.generate_Y(var_subset=var_subset) @@ -233,7 +111,7 @@ def test_zero_padding(): # Ai = lifter.get_mat(lifter.get_reduced_a(bi, lifter.var_dict)) try: lifter.test_constraints([Ai]) - except: + except AssertionError: b_poly_test = lifter.convert_b_to_polyrow(bi_sub, var_subset) print(b_poly_test) @@ -250,8 +128,6 @@ def test_zero_padding(): if __name__ == "__main__": test_zero_padding() test_b_to_a() - test_learned_constraints_augment(d=1, param_level="p") - test_learned_constraints_augment(d=2, param_level="ppT") - test_learned_constraints(d=2, param_level="ppT") + test_learned_constraints() test_canonical_operations() print("all tests passed") diff --git a/_test/test_solvers.py b/_test/test_solvers.py index e585ced..45c4a08 100644 --- a/_test/test_solvers.py +++ b/_test/test_solvers.py @@ -1,12 +1,16 @@ -import matplotlib.pylab as plt import numpy as np from lifters.poly_lifters import PolyLifter from lifters.mono_lifter import MonoLifter from lifters.robust_pose_lifter import RobustPoseLifter +from utils.geometry import get_xtheta_from_theta + from _test.test_tools import all_lifters +NOISE = 1e-2 + + def test_hess_finite_diff(): for lifter in all_lifters(): lifter.generate_random_setup() @@ -15,7 +19,7 @@ def test_hess_finite_diff(): errors = [] eps_list = np.logspace(-10, -5, 5) for eps in eps_list: - Q, y = lifter.get_Q(noise=1e-2) + Q, y = lifter.get_Q(noise=NOISE) theta = lifter.get_vec_around_gt(delta=0).flatten("C") try: @@ -110,7 +114,7 @@ def test_grad_finite_diff(): def test_cost_noisy(): - test_cost(noise=0.1) + test_cost(noise=NOISE) def test_cost(noise=0.0): @@ -142,7 +146,7 @@ def test_cost(noise=0.0): assert abs(cost - np.sum(w < 0)) < 1e-10 -def test_solvers_noisy(n_seeds=1, noise=1e-1): +def test_solvers_noisy(n_seeds=1, noise=NOISE): test_solvers(n_seeds=n_seeds, noise=noise) @@ -170,7 +174,8 @@ def test_solvers(n_seeds=1, noise=0.0): if len(theta_hat) == len(theta_gt): np.testing.assert_allclose(theta_hat, theta_gt) else: - theta_gt = lifter.get_vec_around_gt(delta=0) + # theta_gt = lifter.get_vec_around_gt(delta=0) + theta_gt = get_xtheta_from_theta(theta_gt, lifter.d) np.testing.assert_allclose(theta_hat, theta_gt) else: @@ -178,7 +183,7 @@ def test_solvers(n_seeds=1, noise=0.0): assert theta_hat is not None # test that we converge to real solution when initializing around it - theta_0 = lifter.get_vec_around_gt(delta=1e-2) + theta_0 = lifter.get_vec_around_gt(delta=NOISE) theta_hat, msg, cost_solver = lifter.local_solver(theta_0, y) print("init: ", theta_0) @@ -194,11 +199,16 @@ def test_solvers(n_seeds=1, noise=0.0): cost_lifter = lifter.get_cost(theta_hat, y) assert abs(cost_solver - cost_lifter) < 1e-10, (cost_solver, cost_lifter) - if noise == 0: - # test that "we made progress" + # test that "we made progress" + if len(theta_0) != len(theta_hat): + xtheta_0 = get_xtheta_from_theta(theta_0, lifter.d) + progress = np.linalg.norm(xtheta_0 - theta_hat) + else: progress = np.linalg.norm(theta_0 - theta_hat) - assert progress > 1e-10, progress + assert progress > 1e-10, progress + + if noise == 0: # test that cost decreased cost_0 = lifter.get_cost(theta_0, y) cost_hat = lifter.get_cost(theta_hat, y) @@ -227,17 +237,10 @@ def test_solvers(n_seeds=1, noise=0.0): except NotImplementedError: print("implement Hessian for further checks.") print(e) - else: - # test that "we made progress" - progress = np.linalg.norm(theta_0 - theta_hat) - assert progress > 1e-10, progress - - # just test that we converged when noise is added - assert theta_hat is not None def compare_solvers(): - noise = 1e-1 + noise = NOISE for lifter in all_lifters(): if isinstance(lifter, RobustPoseLifter): compare_solvers = [ @@ -278,6 +281,8 @@ def compare_solvers(): if theta_hat is None: print(solver, "failed") else: + if len(theta_hat) != len(theta_gt): + theta_gt = get_xtheta_from_theta(theta_gt, lifter.d) error = np.linalg.norm(theta_hat - theta_gt) print( f"{solver} finished in {ttot:.4f}s, final cost {cost_solver:.1e}, error {error:.1e}. \n\tmessage:{msg} " @@ -285,13 +290,13 @@ def compare_solvers(): if __name__ == "__main__": - import sys import warnings # import pytest # print("testing") # pytest.main([__file__, "-s"]) # print("all tests passed") + with warnings.catch_warnings(): # warnings.simplefilter("error") test_cost() @@ -307,4 +312,3 @@ def compare_solvers(): compare_solvers() print("all tests passed") - # sys.exit() diff --git a/_test/test_special_cases.py b/_test/test_special_cases.py index 3165135..98210cf 100644 --- a/_test/test_special_cases.py +++ b/_test/test_special_cases.py @@ -2,7 +2,6 @@ TODO: SLAM is currently not supported and therefore these tests are not being kept up to date. """ -import matplotlib.pylab as plt import numpy as np from lifters.range_only_slam1 import RangeOnlySLAM1Lifter @@ -84,7 +83,7 @@ def old_test_gauge(): try: S, V = np.linalg.eig(hess) - except: + except ValueError: S, V = np.linalg.eig(hess.toarray()) mask = np.abs(S) < 1e-10 n_null = np.sum(mask) diff --git a/_test/test_state_lifter.py b/_test/test_state_lifter.py index b58ecf2..03aa449 100644 --- a/_test/test_state_lifter.py +++ b/_test/test_state_lifter.py @@ -1,8 +1,11 @@ -import matplotlib.pylab as plt import numpy as np from _test.test_tools import all_lifters -from lifters.state_lifter import unravel_multi_index_triu, ravel_multi_index_triu +from lifters.state_lifter import ( + unravel_multi_index_triu, + ravel_multi_index_triu, + StateLifter, +) import pytest @@ -39,9 +42,9 @@ def test_ravel(): def _test_with_tol(lifter, A_list, tol): - x = lifter.get_x() + x = lifter.get_x().astype(float).reshape((-1, 1)) for Ai in A_list: - err = abs(x.T @ Ai @ x) + err = abs((x.T @ Ai @ x)[0, 0]) assert err < tol, err ai = lifter.get_vec(Ai.toarray()) @@ -85,9 +88,10 @@ def test_learned_constraints(): def test_vec_mat(): """Make sure that we can go back and forth from vec to mat.""" for lifter in all_lifters(): + assert isinstance(lifter, StateLifter) try: A_known = lifter.get_A_known() - except: + except AttributeError: print(f"could not get A_known of {lifter}") A_known = [] @@ -110,15 +114,10 @@ def test_vec_mat(): a_test = lifter.convert_polyrow_to_a(a_poly) np.testing.assert_allclose(a, a_test) - A_learned = lifter.get_A_learned(A_known=A_known, normalize=False) - for A_l, A_k in zip(A_learned[:3], A_known): - np.testing.assert_allclose(A_l.toarray(), A_k.toarray()) - pytest_configure() if __name__ == "__main__": - import sys import warnings test_ravel() @@ -137,4 +136,3 @@ def test_vec_mat(): warnings.simplefilter("ignore") print("all tests passed") - # sys.exit() diff --git a/_test/test_tools.py b/_test/test_tools.py index f10723b..c658087 100644 --- a/_test/test_tools.py +++ b/_test/test_tools.py @@ -4,6 +4,7 @@ from lifters.range_only_lifters import RangeOnlyLocLifter from lifters.range_only_slam1 import RangeOnlySLAM1Lifter from lifters.range_only_slam2 import RangeOnlySLAM2Lifter +from lifters.state_lifter import StateLifter from lifters.stereo1d_lifter import Stereo1DLifter from lifters.stereo2d_lifter import Stereo2DLifter from lifters.stereo3d_lifter import Stereo3DLifter @@ -14,8 +15,8 @@ n_landmarks = 3 n_poses = 4 Lifters = [ - # (Poly4Lifter, dict()), - # (Poly6Lifter, dict()), + (Poly4Lifter, dict()), + (Poly6Lifter, dict()), (WahbaLifter, dict(n_landmarks=3, d=2, robust=False, level="no", n_outliers=0)), (MonoLifter, dict(n_landmarks=5, d=2, robust=False, level="no", n_outliers=0)), (WahbaLifter, dict(n_landmarks=5, d=2, robust=True, level="xwT", n_outliers=1)), @@ -36,7 +37,7 @@ # Below, we always reset seeds to make sure tests are reproducible. -def all_lifters(): +def all_lifters() -> StateLifter: for Lifter, kwargs in Lifters: np.random.seed(1) yield Lifter(**kwargs) diff --git a/certifiable-tools b/certifiable-tools index 9506e70..d83c62d 160000 --- a/certifiable-tools +++ b/certifiable-tools @@ -1 +1 @@ -Subproject commit 9506e7017e307af45cd198f957a05534c2136fb9 +Subproject commit d83c62dfd2b92f7a6dbab90b3e776974fb0be56c diff --git a/lifters/poly_lifters.py b/lifters/poly_lifters.py index 34aaeac..98a407a 100644 --- a/lifters/poly_lifters.py +++ b/lifters/poly_lifters.py @@ -41,26 +41,29 @@ def get_Q_mat(self): def get_error(self, t): return {"MAE": float(abs(self.theta - t))} - def get_x(self, t=None, parameters=None, var_subset=None): - if t is None: - t = self.theta - return np.array([t**i for i in range(self.degree // 2 + 1)]) + def get_x(self, theta=None, parameters=None, var_subset=None): + if theta is None: + theta = self.theta + return np.array([theta**i for i in range(self.degree // 2 + 1)]) def get_Q(self, noise=1e-3): Q = self.get_Q_mat() return Q, None - def get_cost(self, t, *args, **kwargs): + def get_cost(self, theta, *args, **kwargs): Q = self.get_Q_mat() - x = self.get_x(t) + x = self.get_x(theta) return x.T @ Q @ x + def get_hess(self, *args, **kwargs): + raise NotImplementedError + def local_solver(self, t0, *args, **kwargs): from scipy.optimize import minimize sol = minimize(self.get_cost, t0) info = {"success": sol.success} - return sol.x[0], info, sol.fun + return sol.x, info, sol.fun def __repr__(self): return f"poly{self.degree}" diff --git a/lifters/robust_pose_lifter.py b/lifters/robust_pose_lifter.py index 361b6d8..c439282 100644 --- a/lifters/robust_pose_lifter.py +++ b/lifters/robust_pose_lifter.py @@ -1,9 +1,6 @@ from abc import abstractmethod, ABC from copy import deepcopy -import matplotlib -import matplotlib.pylab as plt - import autograd.numpy as np from lifters.state_lifter import StateLifter @@ -267,7 +264,7 @@ def get_cost(self, theta, y): cost = 0 for i in range(self.n_landmarks): - res = self.residual(R, t, self.landmarks[i], y[i]) + res = self.residual_sq(R, t, self.landmarks[i], y[i]) if self.robust: cost += (1 + w[i]) / self.beta**2 * res + 1 - w[i] else: @@ -318,7 +315,7 @@ def cost(R, t): return 0.5 * cost + self.penalty(t) @pymanopt.function.autograd(manifold) - def euclidean_gradient(R, t): + def euclidean_gradient_unused(R, t): grad_R = np.zeros(R.shape) grad_t = np.zeros(t.shape) for i in range(self.n_landmarks): @@ -338,9 +335,9 @@ def euclidean_gradient(R, t): grad_t += Wi.T @ term return grad_R, grad_t - euclidean_gradient = None # set to None + euclidean_gradient = None problem = pymanopt.Problem( - manifold, cost, euclidean_gradient=euclidean_gradient # + manifold, cost, euclidean_gradient=euclidean_gradient ) optimizer = Optimizer(**solver_kwargs) diff --git a/lifters/state_lifter.py b/lifters/state_lifter.py index 4c7bdb9..d537dd6 100644 --- a/lifters/state_lifter.py +++ b/lifters/state_lifter.py @@ -170,9 +170,6 @@ def theta(self, value): @property def base_var_dict(self): - var_dict = {} - # var_dict.update({f"c_{i}": self.d for i in range(self.d)}) - # var_dict.update({"t": self.d}) var_dict = {f"x": self.d**2 + self.d} return var_dict @@ -308,6 +305,12 @@ def get_mat(self, vec, sparse=False, var_dict=None, correct=True): all_var_dict = {key[2]: 1 for key in augment_var_dict.values()} return Ai_poly.get_matrix(all_var_dict) + def get_A_learned(self, A_known=[], var_dict=None, method=METHOD) -> list: + Y = self.generate_Y(var_subset=var_dict) + basis, S = self.get_basis(Y, A_known=A_known, method=method) + A_learned = self.generate_matrices(basis) + return A_learned + def get_A_known(self, var_dict=None) -> list: return [] @@ -589,16 +592,6 @@ def convert_b_to_polyrow(self, b, var_subset, tol=1e-10) -> PolyMatrix: poly_row["h", key] = val return poly_row - def zero_pad_subvector(self, b, var_subset, target_subset=None): - b_row = self.convert_b_to_polyrow(b, var_subset) - target_row_dict = self.var_dict_row(var_subset=target_subset) - - # find out if the relevant variables of b are a subset of target_subset. - if set(b_row.variable_dict_j).issubset(target_row_dict): - return self.zero_pad_subvector_old(b, var_subset, target_subset) - else: - return None - def apply_templates(self, basis_list, n_landmarks=None, verbose=False): """ Apply the learned patterns in basis_list to all landmarks. diff --git a/lifters/stereo1d_lifter.py b/lifters/stereo1d_lifter.py index 84a1f10..a337418 100644 --- a/lifters/stereo1d_lifter.py +++ b/lifters/stereo1d_lifter.py @@ -68,13 +68,13 @@ def get_x(self, theta=None, parameters=None, var_subset=None): if key == "h": x_data.append(1.0) elif key == "x": - x_data.append(float(theta)) + x_data.append(float(theta[0])) elif "z" in key: idx = int(key.split("_")[-1]) if self.param_level == "p": - x_data.append(float(1 / (theta - parameters[idx + 1]))) + x_data.append(float(1 / (theta[0] - parameters[idx + 1]))) elif self.param_level == "no": - x_data.append(float(1 / (theta - self.landmarks[idx]))) + x_data.append(float(1 / (theta[0] - self.landmarks[idx]))) else: raise ValueError("unknown key in get_x", key) return np.array(x_data) diff --git a/lifters/stereo2d_problem.py b/lifters/stereo2d_problem.py index 9fbfcbe..b929030 100644 --- a/lifters/stereo2d_problem.py +++ b/lifters/stereo2d_problem.py @@ -29,14 +29,23 @@ def _T(phi): phi_flat = phi[:, 0] else: phi_flat = phi - x, y, theta = phi_flat - return np.array( - [ - [np.cos(theta), -np.sin(theta), x], - [np.sin(theta), np.cos(theta), y], - [0, 0, 1], - ] - ) + try: + x, y, theta = phi_flat + return np.array( + [ + [np.cos(theta), -np.sin(theta), x], + [np.sin(theta), np.cos(theta), y], + [0, 0, 1], + ] + ) + except ValueError: + x, y, *c = phi_flat + return np.vstack( + [ + np.hstack([np.array(c).reshape((2, 2)).T, np.array([x, y])[:, None]]), + np.array([0, 0, 1]), + ] + ) def generate_problem(N, sigma, M): diff --git a/poly_matrix b/poly_matrix index 14ee250..7910a28 160000 --- a/poly_matrix +++ b/poly_matrix @@ -1 +1 @@ -Subproject commit 14ee25087e9d46265985825ad5d028c67f3b9491 +Subproject commit 7910a28d60c7beeb6dabc0c236aa1f4898f107a3 diff --git a/utils/constraint.py b/utils/constraint.py index 41c8a82..9194ebe 100644 --- a/utils/constraint.py +++ b/utils/constraint.py @@ -118,9 +118,9 @@ def init_from_A_poly( @staticmethod def init_from_polyrow_b( - index: int, polyrow_b: PolyMatrix, lifter: StateLifter, + index: int = 0, known: bool = False, template_idx: int = None, ): diff --git a/utils/geometry.py b/utils/geometry.py index e80bb3e..acea108 100644 --- a/utils/geometry.py +++ b/utils/geometry.py @@ -92,6 +92,7 @@ def get_xtheta_from_T(T): r = T[:-1, -1] return np.r_[r, C.flatten("F")] + def get_pose_errors_from_xtheta(xtheta_hat, xtheta_gt, d): C_hat, r_hat = get_C_r_from_xtheta(xtheta_hat, d) C_gt, r_gt = get_C_r_from_xtheta(xtheta_gt, d) @@ -101,4 +102,4 @@ def get_pose_errors_from_xtheta(xtheta_hat, xtheta_gt, d): "r error": r_error, "C error": C_error, "total error": r_error + C_error, - } \ No newline at end of file + } From fc944b052663afb89f3adca254988a9ac6f4a60d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Frederike=20D=C3=BCmbgen?= Date: Fri, 29 Dec 2023 14:45:47 -0500 Subject: [PATCH 3/9] Create python-package-conda.yml --- .github/workflows/python-package-conda.yml | 44 ++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 .github/workflows/python-package-conda.yml diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml new file mode 100644 index 0000000..29b0727 --- /dev/null +++ b/.github/workflows/python-package-conda.yml @@ -0,0 +1,44 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Python application + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +permissions: + contents: read + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + submodules: true + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Add conda to system path + run: | + # $CONDA is an environment variable pointing to the root of the miniconda directory + echo $CONDA/bin >> $GITHUB_PATH + - name: Install dependencies + run: | + conda env update --file environment.yml --name base + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with pytest + run: | + conda install pytest + pytest From 7834ffb01534a55fdd2fb53dcfdea727b1e72e60 Mon Sep 17 00:00:00 2001 From: Frederike Duembgen Date: Fri, 29 Dec 2023 14:57:20 -0500 Subject: [PATCH 4/9] Add workflow --- .github/workflows/python-package-conda.yml | 44 ++++++++++++++++++++++ .gitignore | 2 + poly_matrix | 2 +- starloc | 2 +- 4 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/python-package-conda.yml diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml new file mode 100644 index 0000000..29b0727 --- /dev/null +++ b/.github/workflows/python-package-conda.yml @@ -0,0 +1,44 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Python application + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +permissions: + contents: read + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + submodules: true + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Add conda to system path + run: | + # $CONDA is an environment variable pointing to the root of the miniconda directory + echo $CONDA/bin >> $GITHUB_PATH + - name: Install dependencies + run: | + conda env update --file environment.yml --name base + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with pytest + run: | + conda install pytest + pytest diff --git a/.gitignore b/.gitignore index e5fa06c..f11811c 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,5 @@ venv/ *.code-workspace solve_mosek.ptf solve_cvxpy*.ptf +build/ +dist/ diff --git a/poly_matrix b/poly_matrix index 7910a28..14ee250 160000 --- a/poly_matrix +++ b/poly_matrix @@ -1 +1 @@ -Subproject commit 7910a28d60c7beeb6dabc0c236aa1f4898f107a3 +Subproject commit 14ee25087e9d46265985825ad5d028c67f3b9491 diff --git a/starloc b/starloc index c555a79..d3ad541 160000 --- a/starloc +++ b/starloc @@ -1 +1 @@ -Subproject commit c555a79b7ff2afa7fa14149b9003e0796f36f0af +Subproject commit d3ad541c9c9d9c7f7cbfa5064a13fc81aebeb9e2 From be1367c8e5f6ee369445c8c9efb9a0a3a1c176f9 Mon Sep 17 00:00:00 2001 From: Frederike Duembgen Date: Fri, 29 Dec 2023 14:58:49 -0500 Subject: [PATCH 5/9] Update submodule --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 37a7936..7dc668f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -3,7 +3,7 @@ url = git@github.com:utiasASRL/poly_matrix [submodule "starloc"] path = starloc - url = git@github.com:duembgen/starloc + url = git@github.com:utiasASRL/starloc [submodule "certifiable-tools"] path = certifiable-tools url = git@github.com:utiasASRL/certifiable-tools From d6cb104311a43f9e05082f9d963202704a1e562c Mon Sep 17 00:00:00 2001 From: Frederike Duembgen Date: Fri, 29 Dec 2023 15:04:21 -0500 Subject: [PATCH 6/9] Try adding private token --- .github/workflows/python-package-conda.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index 29b0727..38bc3f5 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -20,7 +20,8 @@ jobs: steps: - uses: actions/checkout@v3 with: - submodules: true + token: ${{ secrets.CONSTRAINT_LEARNING_PAT }} + submodules: recursive - name: Set up Python 3.10 uses: actions/setup-python@v3 with: From b23fcc7ffa050091c9d974393ee5e0d32259cbaa Mon Sep 17 00:00:00 2001 From: Frederike Duembgen Date: Fri, 29 Dec 2023 15:10:13 -0500 Subject: [PATCH 7/9] Add flake8 --- environment.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/environment.yml b/environment.yml index 5eb1318..ea620df 100644 --- a/environment.yml +++ b/environment.yml @@ -8,6 +8,7 @@ channels: dependencies: - python=3.10 - pip=22.3 + - flake8 - numpy>=1.23.5 - scipy>=1.9.3 From 0a9392f8853b62b60358a2b869c11ba6fe4583f3 Mon Sep 17 00:00:00 2001 From: Frederike Duembgen Date: Fri, 29 Dec 2023 15:17:51 -0500 Subject: [PATCH 8/9] Fix lynting errors --- lifters/robust_pose_lifter.py | 3 +- lifters/stereo1d_slam_lifter.py | 205 -------------------------------- utils/plotting_tools.py | 18 --- 3 files changed, 1 insertion(+), 225 deletions(-) delete mode 100644 lifters/stereo1d_slam_lifter.py diff --git a/lifters/robust_pose_lifter.py b/lifters/robust_pose_lifter.py index c439282..99c02bb 100644 --- a/lifters/robust_pose_lifter.py +++ b/lifters/robust_pose_lifter.py @@ -95,8 +95,7 @@ def penalty(self, t, rho=PENALTY_RHO, u=PENALTY_U): [rho * u * np.log10(1 + np.exp(hi / u)) for hi in self.h_list(t)] ) except RuntimeWarning: - PENALTY_U *= 0.1 - u = PENALTY_U + u *= 0.1 return np.sum( [rho * u * np.log10(1 + np.exp(hi / u)) for hi in self.h_list(t)] ) diff --git a/lifters/stereo1d_slam_lifter.py b/lifters/stereo1d_slam_lifter.py deleted file mode 100644 index fb92cf1..0000000 --- a/lifters/stereo1d_slam_lifter.py +++ /dev/null @@ -1,205 +0,0 @@ -import numpy as np - -from lifters.state_lifter import StateLifter - - -class Stereo1DSLAMLifter(StateLifter): - LEVELS = ["no", "za", "all"] - - def __init__(self, n_landmarks, level="no"): - self.n_landmarks = n_landmarks - self.d = 1 - self.W = 1.0 - self.theta_ = None - self.level = level - if self.level == "no": - L = 0 - elif self.level == "za": - L = self.n_landmarks**2 - elif self.level == "all": - # z*a + t*a + t*z + y_i_j * ak - # y_i_j ut_i vt_i + w_i_j_k - L = ( - self.n_landmarks**2 - + 2 * self.n_landmarks - + self.n_landmarks**2 * self.n_landmarks - ) - super().__init__(theta_shape=(1 + n_landmarks,), M=n_landmarks, L=L) - - @property - def theta(self): - if self.theta_ is None: - self.theta_ = self.sample_theta() - return self.theta_ - - def generate_random_setup(self): - pass - - def sample_theta(self): - # for SLAM, we also want to regenerate the landmarks! - # self.generate_random_setup() - - theta = np.random.rand(1) - landmarks = np.random.rand(self.n_landmarks) - counter = 0 - while np.min(np.abs(theta - landmarks)) <= 1e-3: - theta = np.random.rand(1) - landmarks = np.random.rand(self.n_landmarks) - counter += 1 - if counter >= 1000: - print("Warning: couldn't find valid setup") - return - return np.r_[theta, landmarks] - - def get_x(self, theta=None, var_subset=None): - """ - :param var_subset: list of variables to include in x vector. Set to None for all. - """ - if theta is None: - theta = self.theta - if var_subset is None: - var_subset = self.var_dict.keys() - - x, *landmarks = theta - - x_data = [] - for key in var_subset: - if key == "h": - x_data.append(1.0) - elif key == "x": - x_data.append(float(x)) - elif "a" in key: - idx = int(key.split("_")[-1]) - x_data.append(float(landmarks[idx])) - elif "z" in key: - idx = int(key.split("_")[-1]) - zi = float(1 / (x - landmarks[idx])) - x_data.append(zi) - elif "y" in key: - i, j = list(map(int, key.split("_")[-2:])) - zi = float(1 / (x - landmarks[i])) - aj = landmarks[j] - x_data.append(zi * aj) - elif "u" in key: - idx = int(key.split("_")[-1]) - x_data.append(float(x * landmarks[idx])) - elif "v" in key: - idx = int(key.split("_")[-1]) - zi = float(1 / (x - landmarks[idx])) - x_data.append(float(x * zi)) - elif "w" in key: - i, j, k = list(map(int, key.split("_")[-3:])) - zi = float(1 / (x - landmarks[i])) - aj = landmarks[j] - ak = landmarks[k] - x_data.append(zi * aj * ak) - else: - raise ValueError("unknown key in get_x", key) - return np.array(x_data) - - @property - def var_dict(self): - import itertools - - vars = ["h", "x"] - vars += [f"a_{j}" for j in range(self.n_landmarks)] - vars += [f"z_{j}" for j in range(self.n_landmarks)] - if self.level == "za": - vars += [ - f"y_{i}_{j}" - for i, j in itertools.product( - range(self.n_landmarks), range(self.n_landmarks) - ) - ] - elif self.level == "all": - vars += [ - f"y_{i}_{j}" - for i, j in itertools.product( - range(self.n_landmarks), range(self.n_landmarks) - ) - ] - vars += [f"u_{i}" for i in range(self.n_landmarks)] - vars += [f"v_{i}" for i in range(self.n_landmarks)] - vars += [ - f"w_{i}_{j}_{k}" - for i, j, k in itertools.product( - range(self.n_landmarks), - range(self.n_landmarks), - range(self.n_landmarks), - ) - ] - return {v: 1 for v in vars} - - def get_grad(self, t, y): - raise NotImplementedError("get_grad not implement yet") - - def get_Q(self, noise: float = 1e-3) -> tuple: - from poly_matrix.least_squares_problem import LeastSquaresProblem - - x, *landmarks = self.theta - - y = 1 / (x - landmarks) + np.random.normal( - scale=noise, loc=0, size=self.n_landmarks - ) - - ls_problem = LeastSquaresProblem() - for j in range(len(y)): - ls_problem.add_residual({"h": -y[j], f"z_{j}": 1}) - return ls_problem.get_Q().get_matrix(self.var_dict), y - - def get_A_known(self, add_known_redundant=False): - from poly_matrix.poly_matrix import PolyMatrix - - A_known = [] - - # enforce that z_j = 1/(x - a_j) <=> 1 - z_j*x + a_j*z_j = 0 - for j in range(self.n_landmarks): - A = PolyMatrix() - A[f"a_{j}", f"z_{j}"] = 0.5 - A["x", f"z_{j}"] = -0.5 - A["h", "h"] = 1.0 - A_known.append(A.get_matrix(variables=self.var_dict)) - - if True: # not add_known_redundant: - return A_known - - # TODO(FD) below constraint is not quadratic anymore! - # add known redundant constraints: - # enforce that z_j - z_i = (a_j - a_i) * z_j * z_i - for i in range(self.n_landmarks): - for j in range(i + 1, self.n_landmarks): - A = PolyMatrix() - A["h", f"z_{j}"] = 1 - A["h", f"z_{i}"] = -1 - A[f"z_{i}", f"z_{j}"] = self.landmarks[i] - landmarks[j] - A_known.append(A.get_matrix(variables=self.var_dict)) - return A_known - - def get_cost(self, x, y): - t, *landmarks = x - W = self.W - return np.sum((y - (1 / (t - landmarks))) ** 2) - - def local_solver( - self, t_init, y, num_iters=100, eps=1e-5, W=None, verbose=False, **kwargs - ): - x_op, *landmarks = t_init - for i in range(num_iters): - u = y - (1 / (x_op - landmarks)) - if verbose: - print(f"cost {i}", np.sum(u**2)) - du = 1 / ((x_op - landmarks) ** 2) - if np.linalg.norm(du) > 1e-10: - dx = -np.sum(u * du) / np.sum(du * du) - x_op = x_op + dx - t_op = np.r_[x_op, landmarks] - if np.abs(dx) < eps: - msg = f"converged in dx after {i} it" - return t_op, msg, self.get_cost(t_op, y) - else: - msg = f"converged in du after {i} it" - return t_op, msg, self.get_cost(t_op, y) - return None, "didn't converge", None - - def __repr__(self): - return "stereo1d_slam" diff --git a/utils/plotting_tools.py b/utils/plotting_tools.py index dab24f8..7124223 100644 --- a/utils/plotting_tools.py +++ b/utils/plotting_tools.py @@ -332,25 +332,7 @@ def plot_matrices(df_tight, fname_root): fname = fname_root + f"_{order_name}_{order_type}.png" savefig(fig, fname) - return - matrix_agg = { - "A": A_agg.astype(int), - "H": H, - "Q": Q.toarray(), - } - for matrix_type, matrix in matrix_agg.items(): - if matrix is None: - continue - fname = fname_root + f"_{order_name}_{order_type}_{matrix_type}.png" - fig, ax = plt.subplots() - plot_matrix( - ax=ax, - Ai=matrix, - colorbar=True, - title=matrix_type, - ) - savefig(fig, fname) def plot_singular_values(S, eps=None, label="singular values", ax=None): From f50df044055aa823b9ee3fdf8f67a825df9f27d4 Mon Sep 17 00:00:00 2001 From: Frederike Duembgen Date: Fri, 29 Dec 2023 15:23:03 -0500 Subject: [PATCH 9/9] Unify requirements --- environment.yml | 1 + requirements.txt | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index ea620df..49263e1 100644 --- a/environment.yml +++ b/environment.yml @@ -19,6 +19,7 @@ dependencies: - pytest>=7.2.2 - jupyter>=1.0.0 - black>=23.1.0 + - autograd>=1.2.0 - mosek - tbb=2020.2 # sparseqr conflict - autograd>=1.6.2 diff --git a/requirements.txt b/requirements.txt index 8fb1361..855eba5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,5 +7,11 @@ progressbar2>=4.2.0 pytest>=7.2.2 jupyter>=1.0.0 black>=23.1.0 -autograd>=1.25.0 +autograd>=1.6.2 +mosek pymanopt>=2.1.1 +sparseqr +asrl-pylgmath>=1.0.3 +-e poly_matrix/ +-e certifiable-tools/ +-e .