Skip to content

Commit

Permalink
Fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
duembgen committed Jan 19, 2024
1 parent 4213f87 commit 3926af5
Show file tree
Hide file tree
Showing 23 changed files with 154 additions and 107 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/python-package-conda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ jobs:
- uses: actions/checkout@v3
with:
token: ${{ secrets.CONSTRAINT_LEARNING_PAT }}
submodules: recursive
submodules: true
- name: Set up Python 3.10
uses: actions/setup-python@v3
with:
Expand All @@ -42,4 +42,4 @@ jobs:
- name: Test with pytest
run: |
conda install pytest
pytest
pytest _test/
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
_results/
_results_server/
_results_laptop/
_plots/
starrynight/
log/
Expand Down
4 changes: 3 additions & 1 deletion _scripts/get_server_results.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
#!/usr/bin/bash

# arxiv mode, verbose, compress.
rsync -avz -e 'ssh' [email protected]:/home/fdu/constraint_learning/_results/* _results_server/ --exclude-from='_scripts/exclude-server.txt' --exclude="*.pdf"
#rsync -avz -e 'ssh' [email protected]:/home/fdu/constraint_learning/_results/* _results_server/ --exclude-from='_scripts/exclude-server.txt' --exclude="*.pdf"

rsync -avz -e 'ssh' [email protected]:/home/asrl/research/constraint_learning/_results/* _results_laptop/ --exclude-from='_scripts/exclude-server.txt' --exclude="*.pdf"

# not needed anymore: copy starrynight dataset over to server
# rsync -avz -e 'ssh' ./starrynight [email protected]:/home/fdu/constraint_learning/
14 changes: 7 additions & 7 deletions _scripts/run_all_study.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,14 @@ def generate_results(lifters, seed=0):

def run_all(recompute=RECOMPUTE):
lifters = [
# (RangeOnlyLocLifter, dict(n_positions=3, n_landmarks=10, d=3, level="no")),
# (RangeOnlyLocLifter, dict(n_positions=3, n_landmarks=10, d=3, level="quad")),
(RangeOnlyLocLifter, dict(n_positions=3, n_landmarks=10, d=3, level="no")),
(RangeOnlyLocLifter, dict(n_positions=3, n_landmarks=10, d=3, level="quad")),
(Stereo2DLifter, dict(n_landmarks=3, param_level="ppT", level="urT")),
# (Stereo3DLifter, dict(n_landmarks=4, param_level="ppT", level="urT")),
# (WahbaLifter, dict(n_landmarks=5, d=3, robust=True, level="xwT", n_outliers=1)),
# (MonoLifter, dict(n_landmarks=6, d=3, robust=True, level="xwT", n_outliers=1)),
# (WahbaLifter, dict(n_landmarks=4, d=3, robust=False, level="no", n_outliers=0)),
# (MonoLifter, dict(n_landmarks=5, d=3, robust=False, level="no", n_outliers=0)),
(Stereo3DLifter, dict(n_landmarks=4, param_level="ppT", level="urT")),
(WahbaLifter, dict(n_landmarks=5, d=3, robust=True, level="xwT", n_outliers=1)),
(MonoLifter, dict(n_landmarks=6, d=3, robust=True, level="xwT", n_outliers=1)),
(WahbaLifter, dict(n_landmarks=4, d=3, robust=False, level="no", n_outliers=0)),
(MonoLifter, dict(n_landmarks=5, d=3, robust=False, level="no", n_outliers=0)),
]

fname = f"{RESULTS_DIR}/all_df_new.pkl"
Expand Down
2 changes: 1 addition & 1 deletion _scripts/run_datasets_ro.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

try:
matplotlib.use("TkAgg") # non-interactive
except Exception as e:
except:
pass

import pandas as pd
Expand Down
26 changes: 13 additions & 13 deletions _scripts/run_other_study.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,18 +112,18 @@ def run_wahba(n_seeds, recompute, tightness=True, scalability=True):

print("================= Wahba study ==================")

# if tightness:
# lifter_tightness(WahbaLifter, d=d, n_landmarks=4, robust=False)
if tightness:
lifter_tightness(WahbaLifter, d=d, n_landmarks=4, robust=False)
if scalability:
# lifter_scalability_new(
# WahbaLifter,
# d=d,
# n_landmarks=4,
# robust=False,
# n_outliers=0,
# n_seeds=n_seeds,
# recompute=recompute,
# )
lifter_scalability_new(
WahbaLifter,
d=d,
n_landmarks=4,
robust=False,
n_outliers=0,
n_seeds=n_seeds,
recompute=recompute,
)
lifter_scalability_new(
WahbaLifter,
d=d,
Expand Down Expand Up @@ -172,6 +172,6 @@ def run_all(n_seeds, recompute, tightness=True, scalability=True):


if __name__ == "__main__":
# run_all(n_seeds=1, recompute=True)
run_all(n_seeds=1, recompute=True)
# run_mono(n_seeds=1, recompute=True)
run_wahba(n_seeds=1, recompute=True)
# run_wahba(n_seeds=1, recompute=True)
8 changes: 4 additions & 4 deletions _scripts/run_stereo_study.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,10 +96,10 @@ def stereo_scalability_new(n_seeds, recompute, d=2):

learner = Learner(lifter=lifter, variable_list=lifter.variable_list)

# if lifter.d == 2:
# fname_root = f"{RESULTS_DIR}/scalability_{learner.lifter}"
# learner = Learner(lifter=lifter, variable_list=lifter.variable_list)
# run_scalability_plot(learner, recompute=recompute, fname_root=fname_root)
if lifter.d == 2:
fname_root = f"{RESULTS_DIR}/scalability_{learner.lifter}"
learner = Learner(lifter=lifter, variable_list=lifter.variable_list)
run_scalability_plot(learner, recompute=recompute, fname_root=fname_root)

df = run_scalability_new(
learner,
Expand Down
4 changes: 2 additions & 2 deletions _scripts/run_time_study.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,8 @@ def generate_results(lifter: MatWeightLocLifter, n_params_list=[10], fname=""):

n_params_list = np.logspace(1, 6, 6).astype(int)
# n_params_list = np.logspace(1, 2, 10).astype(int)
fname = f"_results/{lifter}_time_dsdp.pkl"
overwrite = True
fname = f"_results_laptop/{lifter}_time_dsdp.pkl"
overwrite = False

try:
assert overwrite is False
Expand Down
74 changes: 53 additions & 21 deletions _test/test_solvers.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import numpy as np

from _test.tools import all_lifters
from lifters.matweight_lifter import MatWeightLifter
from lifters.mono_lifter import MonoLifter
from lifters.poly_lifters import PolyLifter
from lifters.robust_pose_lifter import RobustPoseLifter
Expand All @@ -18,12 +19,12 @@ def test_hess_finite_diff():
eps_list = np.logspace(-10, -5, 5)
for eps in eps_list:
Q, y = lifter.get_Q(noise=NOISE)
theta = lifter.get_vec_around_gt(delta=0).flatten("C")
theta = lifter.get_vec_around_gt(delta=0)

try:
grad = lifter.get_grad(theta, y)
hess = lifter.get_hess(theta, y).toarray()
except NotImplementedError:
except (NotImplementedError, AttributeError):
print("get_hess not implemented?")
return

Expand Down Expand Up @@ -68,7 +69,7 @@ def test_grad_finite_diff():
for eps in eps_list:
Q, y = lifter.get_Q(noise=1)

theta = lifter.get_vec_around_gt(delta=0).flatten("C")
theta = lifter.get_vec_around_gt(delta=0)
cost = lifter.get_cost(theta, y)

try:
Expand Down Expand Up @@ -136,7 +137,12 @@ def test_cost(noise=0.0):
# for Stereo3D problem.
assert abs(cost - costQ) < 1e-6, (cost, costQ)

if noise == 0 and not isinstance(lifter, PolyLifter) and not lifter.robust:
if (
noise == 0
and not isinstance(lifter, PolyLifter)
and not lifter.robust
and not isinstance(lifter, MatWeightLifter)
):
assert cost < 1e-10, cost
assert costQ < 1e-7, costQ
elif noise == 0 and isinstance(lifter, MonoLifter):
Expand Down Expand Up @@ -169,12 +175,19 @@ def test_solvers(n_seeds=1, noise=0.0):
continue
if noise == 0:
# test that solution is ground truth with no noise
if len(theta_hat) == len(theta_gt):
np.testing.assert_allclose(theta_hat, theta_gt)

if type(theta_gt) is dict:
for i in range(lifter.n_poses):
val_hat = theta_hat[f"xT0_{i}"]
val_gt = theta_gt[f"x_{i}"].matrix()
np.testing.assert_allclose(val_hat, val_gt)
else:
# theta_gt = lifter.get_vec_around_gt(delta=0)
theta_gt = get_xtheta_from_theta(theta_gt, lifter.d)
np.testing.assert_allclose(theta_hat, theta_gt)
if len(theta_hat) == len(theta_gt):
np.testing.assert_allclose(theta_hat, theta_gt)
else:
# theta_gt = lifter.get_vec_around_gt(delta=0)
theta_gt = get_xtheta_from_theta(theta_gt, lifter.d)
np.testing.assert_allclose(theta_hat, theta_gt)

else:
# just test that we converged when noise is added
Expand All @@ -195,15 +208,25 @@ def test_solvers(n_seeds=1, noise=0.0):
print(f"{lifter} converged noise {noise}, seed {j}.")

cost_lifter = lifter.get_cost(theta_hat, y)
assert abs(cost_solver - cost_lifter) < 1e-10, (cost_solver, cost_lifter)
if cost_lifter >= 1e-10:
assert abs(cost_solver - cost_lifter) / cost_lifter < 1e-5, (
cost_solver,
cost_lifter,
)

# test that "we made progress"
if len(theta_0) != len(theta_hat):
xtheta_0 = get_xtheta_from_theta(theta_0, lifter.d)
progress = np.linalg.norm(xtheta_0 - theta_hat)
# test that we made progress
if type(theta_hat) is dict:
progress = 0
for i in range(lifter.n_poses):
val_hat = theta_hat[f"xT0_{i}"]
val_gt = theta_gt[f"x_{i}"].matrix()
progress += np.linalg.norm(val_hat - val_gt)
else:
progress = np.linalg.norm(theta_0 - theta_hat)

if len(theta_0) != len(theta_hat):
xtheta_0 = get_xtheta_from_theta(theta_0, lifter.d)
progress = np.linalg.norm(xtheta_0 - theta_hat)
else:
progress = np.linalg.norm(theta_0 - theta_hat)
assert progress > 1e-10, progress

if noise == 0:
Expand All @@ -216,7 +239,18 @@ def test_solvers(n_seeds=1, noise=0.0):
if lifter.n_outliers > 0:
continue
try:
np.testing.assert_allclose(theta_hat, theta_gt, rtol=1e-3)
if type(theta_gt) is dict:
for i in range(lifter.n_poses):
val_hat = theta_hat[f"xT0_{i}"]
val_gt = theta_gt[f"x_{i}"].matrix()
np.testing.assert_allclose(val_hat, val_gt, rtol=1e-3)
else:
if len(theta_hat) == len(theta_gt):
np.testing.assert_allclose(theta_hat, theta_gt, rtol=1e-3)
else:
# theta_gt = lifter.get_vec_around_gt(delta=0)
theta_gt = get_xtheta_from_theta(theta_gt, lifter.d)
np.testing.assert_allclose(theta_hat, theta_gt, rtol=1e-3)
except AssertionError as e:
print(
f"Found solution for {lifter} is not ground truth in zero-noise! is the problem well-conditioned?"
Expand All @@ -232,7 +266,7 @@ def test_solvers(n_seeds=1, noise=0.0):
print(
f"minimum eigenvalue at gt: {mineig_hess_gt:.1e} and at estimate: {mineig_hess_hat:.1e}"
)
except NotImplementedError:
except (NotImplementedError, AttributeError):
print("implement Hessian for further checks.")
print(e)

Expand Down Expand Up @@ -279,9 +313,7 @@ def compare_solvers():
if theta_hat is None:
print(solver, "failed")
else:
if len(theta_hat) != len(theta_gt):
theta_gt = get_xtheta_from_theta(theta_gt, lifter.d)
error = np.linalg.norm(theta_hat - theta_gt)
error = lifter.get_error(theta_hat)["error"]
print(
f"{solver} finished in {ttot:.4f}s, final cost {cost_solver:.1e}, error {error:.1e}. \n\tmessage:{msg} "
)
Expand Down
13 changes: 4 additions & 9 deletions _test/test_state_lifter.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import numpy as np
import pytest

from _test.tools import all_lifters
from lifters.state_lifter import (
unravel_multi_index_triu,
ravel_multi_index_triu,
StateLifter,
)

import pytest
from lifters.state_lifter import ravel_multi_index_triu, unravel_multi_index_triu


def pytest_configure():
Expand Down Expand Up @@ -88,7 +83,6 @@ def test_learned_constraints():
def test_vec_mat():
"""Make sure that we can go back and forth from vec to mat."""
for lifter in all_lifters():
assert isinstance(lifter, StateLifter)
try:
A_known = lifter.get_A_known()
except AttributeError:
Expand Down Expand Up @@ -128,7 +122,8 @@ def test_vec_mat():
# pytest.main([__file__, "-s"])
# print("all tests passed")
with warnings.catch_warnings():
warnings.simplefilter("error")
warnings.simplefilter("ignore")
# warnings.simplefilter("error")
test_known_constraints()
test_learned_constraints()

Expand Down
7 changes: 4 additions & 3 deletions _test/tools.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import numpy as np

from lifters.matweight_lifter import MatWeightLocLifter
from lifters.mono_lifter import MonoLifter
from lifters.poly_lifters import Poly4Lifter, Poly6Lifter, PolyLifter
from lifters.range_only_lifters import RangeOnlyLocLifter
from lifters.range_only_slam1 import RangeOnlySLAM1Lifter
Expand All @@ -8,9 +10,7 @@
from lifters.stereo1d_lifter import Stereo1DLifter
from lifters.stereo2d_lifter import Stereo2DLifter
from lifters.stereo3d_lifter import Stereo3DLifter
from lifters.mono_lifter import MonoLifter
from lifters.wahba_lifter import WahbaLifter
from mwcerts.slam_lifter import MWSlamLifter

d = 2
n_landmarks = 3
Expand All @@ -21,6 +21,7 @@
(WahbaLifter, dict(n_landmarks=3, d=2, robust=False, level="no", n_outliers=0)),
(MonoLifter, dict(n_landmarks=5, d=2, robust=False, level="no", n_outliers=0)),
(WahbaLifter, dict(n_landmarks=5, d=2, robust=True, level="xwT", n_outliers=1)),
(MatWeightLocLifter, dict(n_landmarks=5, n_poses=10)),
(MonoLifter, dict(n_landmarks=6, d=2, robust=True, level="xwT", n_outliers=1)),
(
RangeOnlyLocLifter,
Expand All @@ -35,7 +36,7 @@
(Stereo2DLifter, dict(n_landmarks=n_landmarks)),
(Stereo3DLifter, dict(n_landmarks=n_landmarks)),
]
Lifters = [(MWSlamLifter, dict(n_landmarks=5, d=3))]
# Lifters = [(MatWeightLocLifter, dict(n_landmarks=5, n_poses=10))]


# Below, we always reset seeds to make sure tests are reproducible.
Expand Down
2 changes: 1 addition & 1 deletion certifiable-tools
3 changes: 1 addition & 2 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,15 @@ dependencies:
- pytest>=7.2.2
- jupyter>=1.0.0
- black>=23.1.0
- autograd>=1.2.0
- mosek
- tbb=2020.2 # sparseqr conflict
- autograd>=1.6.2

- pip:
- chompack>=2.3.1
#- sparseqr
- pymanopt>=2.1.1
- asrl-pylgmath>=1.0.3
- -e poly_matrix # build local poly_matrix submodule
- -e certifiable-tools
- -e mat_weight_certs
- -e . # build local lifting package
Loading

0 comments on commit 3926af5

Please sign in to comment.