Skip to content

Commit

Permalink
removing leftovers from migration
Browse files Browse the repository at this point in the history
  • Loading branch information
Jordan Stomps committed Oct 31, 2022
1 parent a7d4bfe commit 738720e
Showing 1 changed file with 1 addition and 91 deletions.
92 changes: 1 addition & 91 deletions tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,6 @@
import scripts.utils as utils
# models
from models.LogReg import LogReg
from models.SSML.CoTraining import CoTraining
from models.SSML.LabelProp import LabelProp
from models.SSML.ShadowNN import ShadowNN
from models.SSML.ShadowCNN import ShadowCNN
# testing write
import joblib
import os
Expand Down Expand Up @@ -166,90 +162,4 @@ def test_LogReg():
model_file = joblib.load(filename+ext)
assert model_file.best['params'] == model.best['params']

os.remove(filename+ext)


# check default parameter settings
model = ShadowCNN()
assert model.params == {'binning': 1, 'batch_size': 1}
assert model.model is not None
assert model.eaat is not None
assert model.optimizer is not None

X, Ux, y, Uy = train_test_split(spectra,
labels,
test_size=0.5,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=0)

# normalization
normalizer = StandardScaler()
normalizer.fit(X_train)

X_train = normalizer.transform(X_train)
X_test = normalizer.transform(X_test)
Ux = normalizer.transform(Ux)

params = {'layer1': 2,
'kernel': 3,
'alpha': 0.1,
'xi': 1e-3,
'eps': 1.0,
'lr': 0.1,
'momentum': 0.9,
'binning': 20,
'batch_size': 4,
'drop_rate': 0.1}

# default behavior
model = ShadowCNN(params=params, random_state=0)
losscurve, evalcurve = model.train(X_train, y_train, Ux, X_test, y_test)

# testing train and predict methods
pred, acc = model.predict(X_test, y_test)

# test for agreement between training and testing
# (since the same data is used for diagnostics in this test)
assert evalcurve[-1] == acc

# Shadow/PyTorch reports accuracies as percentages
# rather than decimals
# uninteresting test if Shadow predicts all one class
# TODO: make the default params test meaningful
assert np.count_nonzero(pred == y_test) > 0

# testing hyperopt optimize methods
space = params
space['binning'] = scope.int(hp.quniform('binning',
10,
20,
1))
data_dict = {'trainx': X_train,
'testx': X_test,
'trainy': y_train,
'testy': y_test,
'Ux': Ux
}
model.optimize(space, data_dict, max_evals=2, verbose=True)

assert model.best['accuracy'] >= model.worst['accuracy']
assert model.best['status'] == 'ok'

# testing model plotting method
filename = 'test_plot'
model.plot_training(losscurve=model.best['losscurve'],
evalcurve=model.best['evalcurve'],
filename=filename)
os.remove(filename+'.png')

# testing model write to file method
filename = 'test_LogReg'
ext = '.joblib'
model.save(filename)
model_file = joblib.load(filename+ext)
assert model_file.best['params'] == model.best['params']

os.remove(filename+ext)
os.remove(filename+ext)

0 comments on commit 738720e

Please sign in to comment.