This repository has been archived by the owner on Aug 1, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_tensorflow.py
64 lines (47 loc) · 1.71 KB
/
test_tensorflow.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from functools import lru_cache
from typing import Tuple
import numpy as np
import tensorflow as tf
from jackdaw_ml import loads, saves
from jackdaw_ml.artefact_decorator import artefacts
mnist = tf.keras.datasets.mnist
@artefacts({})
class TFWrapper:
model: tf.keras.models.Sequential
def __init__(self):
self.model = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10),
]
)
self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
def fit(self, x_train, y_train, epochs) -> None:
self.model.compile(optimizer="adam", loss=self.loss_fn, metrics=["accuracy"])
self.model.fit(x_train, y_train, epochs)
@lru_cache(maxsize=1)
def example_train_data() -> Tuple:
(x_train, y_train), (_) = mnist.load_data()
x_train = x_train[0:100] / 255.0
y_train = y_train[0:100]
return x_train, y_train
@lru_cache(maxsize=1)
def example_test_data() -> Tuple:
_, (x_test, _) = mnist.load_data()
x_test = x_test / 255.0
return x_test
def tf_float_equivalence(a: tf.Tensor, b: tf.Tensor) -> bool:
return np.all((tf.abs(a - b) < 0.000001).numpy())
def model_equivalent(m1: TFWrapper, m2: TFWrapper) -> bool:
m1_res = m1.model(example_test_data())
m2_res = m2.model(example_test_data())
return tf_float_equivalence(m1_res, m2_res)
def test_basic_wrapper():
m1 = TFWrapper()
m1.fit(*example_train_data(), epochs=1)
model_id = saves(m1)
m2 = TFWrapper()
loads(m2, model_id)
assert model_equivalent(m1, m2)