Skip to content

Commit

Permalink
doctest addons (#677)
Browse files Browse the repository at this point in the history
* doctest for addons

* minor sanity

* add tfa & cleanup

* doctest example

* sanity check

* files for doctest

* remove tensor id

* flake8

* use pytest

* remove tfa_doctest

* pytest for doctest

* remove section in build

* remove unsused files

* update conftest; with no option named skip_custom_ops issue

* flake8

* move np, tfa, tf to confest

* concise command

* fix docstr issue and add example

* sanity check and missing doctest modules

* reomve FloatTensorLike

Co-authored-by: Tzu-Wei Sung <[email protected]>
  • Loading branch information
autoih and WindQAQ authored Aug 4, 2020
1 parent 151e2f7 commit 706e22f
Show file tree
Hide file tree
Showing 13 changed files with 158 additions and 155 deletions.
14 changes: 14 additions & 0 deletions tensorflow_addons/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,20 @@
pytest_collection_modifyitems,
)

import numpy as np
import pytest

import tensorflow as tf
import tensorflow_addons as tfa


# fixtures present in this file will be available
# when running tests and can be referenced with strings
# https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions


@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["np"] = np
doctest_namespace["tf"] = tf
doctest_namespace["tfa"] = tfa
35 changes: 20 additions & 15 deletions tensorflow_addons/layers/wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import logging

import tensorflow as tf

from typeguard import typechecked


Expand All @@ -30,21 +31,25 @@ class WeightNormalization(tf.keras.layers.Wrapper):
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
Tim Salimans, Diederik P. Kingma (2016)
WeightNormalization wrapper works for keras and tf layers.
```python
net = WeightNormalization(
tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3),
data_init=True)(x)
net = WeightNormalization(
tf.keras.layers.Conv2D(16, 5, activation='relu'),
data_init=True)(net)
net = WeightNormalization(
tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNormalization(
tf.keras.layers.Dense(n_classes),
data_init=True)(net)
```
Usage:
>>> net = tfa.layers.WeightNormalization(
... tf.keras.layers.Conv2D(2, 2, activation='relu'),
... input_shape=(32, 32, 3),
... data_init=True)(np.random.rand(32, 32, 3, 1).astype('f'))
>>> net = tfa.layers.WeightNormalization(
... tf.keras.layers.Conv2D(16, 2, activation='relu'),
... data_init=True)(net)
>>> net = tfa.layers.WeightNormalization(
... tf.keras.layers.Dense(120, activation='relu'),
... data_init=True)(net)
>>> net = tfa.layers.WeightNormalization(
... tf.keras.layers.Dense(2),
... data_init=True)(net)
>>> net.shape
TensorShape([32, 30, 1, 2])
Arguments:
layer: a layer instance.
data_init: If `True` use data dependent variable initialization
Expand Down
15 changes: 6 additions & 9 deletions tensorflow_addons/losses/focal_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
"""Implements Focal loss."""

import tensorflow as tf

import tensorflow.keras.backend as K

from tensorflow_addons.utils.keras_utils import LossFunctionWrapper
Expand All @@ -37,15 +38,11 @@ class SigmoidFocalCrossEntropy(LossFunctionWrapper):
Usage:
```python
fl = tfa.losses.SigmoidFocalCrossEntropy()
loss = fl(
y_true = [[1.0], [1.0], [0.0]],
y_pred = [[0.97], [0.91], [0.03]])
print('Loss: ', loss.numpy()) # Loss: [6.8532745e-06,
1.9097870e-04,
2.0559824e-05]
```
>>> fl = tfa.losses.SigmoidFocalCrossEntropy()
>>> loss = fl([[0.97], [0.91], [0.03]], [[1.0], [1.0], [0.0]])
>>> loss
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([0.00010971, 0.00329749, 0.00030611], dtype=float32)>
Usage with tf.keras API:
```python
Expand Down
14 changes: 7 additions & 7 deletions tensorflow_addons/losses/giou_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,13 @@ class GIoULoss(LossFunctionWrapper):
Usage:
```python
gl = tfa.losses.GIoULoss()
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
loss = gl(boxes1, boxes2)
print('Loss: ', loss.numpy()) # Loss: [1.07500000298023224, 1.9333333373069763]
```
>>> gl = tfa.losses.GIoULoss()
>>> boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
>>> boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
>>> loss = gl(boxes1, boxes2)
>>> loss
<tf.Tensor: shape=(), dtype=float32, numpy=1.5041667>
Usage with tf.keras API:
```python
Expand Down
12 changes: 4 additions & 8 deletions tensorflow_addons/losses/quantiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,15 +84,11 @@ class PinballLoss(LossFunctionWrapper):
See: https://en.wikipedia.org/wiki/Quantile_regression
Usage:
```python
pinball = tfa.losses.PinballLoss(tau=.1)
loss = pinball([0., 0., 1., 1.], [1., 1., 1., 0.])
# loss = max(0.1 * (y_true - y_pred), (0.1 - 1) * (y_true - y_pred))
# = (0.9 + 0.9 + 0 + 0.1) / 4
print('Loss: ', loss.numpy()) # Loss: 0.475
```
>>> pinball = tfa.losses.PinballLoss(tau=.1)
>>> loss = pinball([0., 0., 1., 1.], [1., 1., 1., 0.])
>>> loss
<tf.Tensor: shape=(), dtype=float32, numpy=0.475>
Usage with the `compile` API:
Expand Down
43 changes: 24 additions & 19 deletions tensorflow_addons/metrics/cohens_kappa.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
"""Implements Cohen's Kappa."""

import tensorflow as tf

import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.metrics import Metric
Expand All @@ -27,39 +28,43 @@
@tf.keras.utils.register_keras_serializable(package="Addons")
class CohenKappa(Metric):
"""Computes Kappa score between two raters.
The score lies in the range [-1, 1]. A score of -1 represents
complete disagreement between two raters whereas a score of 1
represents complete agreement between the two raters.
A score of 0 means agreement by chance.
Note: As of now, this implementation considers all labels
while calculating the Cohen's Kappa score.
Usage:
```python
actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32)
preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32)
weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32)
m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
m.update_state(actuals, preds)
print('Final result: ', m.result().numpy()) # Result: 0.61904764
# To use this with weights, sample_weight argument can be used.
m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
m.update_state(actuals, preds, sample_weight=weights)
print('Final result: ', m.result().numpy()) # Result: 0.37209308
```
>>> actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32)
>>> preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32)
>>> weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32)
>>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
>>> m.update_state(actuals, preds)
<tf.Tensor: shape=(5, 5), dtype=float32, numpy=
array([[0., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 1., 0.],
[0., 0., 1., 0., 3.]], dtype=float32)>
>>> m.result().numpy()
0.61904764
>>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
>>> m.update_state(actuals, preds, sample_weight=weights)
<tf.Tensor: shape=(5, 5), dtype=float32, numpy=
array([[ 0., 0., 0., 0., 0.],
[ 0., 6., 0., 0., 0.],
[ 0., 0., 0., 0., 10.],
[ 0., 0., 0., 2., 0.],
[ 0., 0., 2., 0., 7.]], dtype=float32)>
>>> m.result().numpy()
0.37209308
Usage with tf.keras API:
```python
model = tf.keras.models.Model(inputs, outputs)
model.add_metric(tfa.metrics.CohenKappa(num_classes=5)(outputs))
model.compile('sgd', loss='mse')
```
"""

@typechecked
Expand Down
19 changes: 7 additions & 12 deletions tensorflow_addons/metrics/matthews_correlation_coefficient.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,18 +42,13 @@ class MatthewsCorrelationCoefficient(tf.keras.metrics.Metric):
((TP + FP) * (TP + FN) * (TN + FP ) * (TN + FN))^(1/2)
Usage:
```python
actuals = tf.constant([[1.0], [1.0], [1.0], [0.0]],
dtype=tf.float32)
preds = tf.constant([[1.0], [0.0], [1.0], [1.0]],
dtype=tf.float32)
# Matthews correlation coefficient
mcc = MatthewsCorrelationCoefficient(num_classes=1)
mcc.update_state(actuals, preds)
print('Matthews correlation coefficient is:',
mcc.result().numpy())
# Matthews correlation coefficient is : -0.33333334
```
>>> actuals = tf.constant([[1.0], [1.0], [1.0], [0.0]], dtype=tf.float32)
>>> preds = tf.constant([[1.0], [0.0], [1.0], [1.0]], dtype=tf.float32)
>>> mcc = tfa.metrics.MatthewsCorrelationCoefficient(num_classes=1)
>>> mcc.update_state(actuals, preds)
>>> mcc.result()
<tf.Tensor: shape=(1,), dtype=float32, numpy=array([-0.33333334], dtype=float32)>
"""

@typechecked
Expand Down
57 changes: 32 additions & 25 deletions tensorflow_addons/metrics/multilabel_confusion_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,11 @@

import warnings

import numpy as np
import tensorflow as tf

from tensorflow.keras import backend as K
from tensorflow.keras.metrics import Metric
import numpy as np

from typeguard import typechecked
from tensorflow_addons.utils.types import AcceptableDTypes, FloatTensorLike
Expand All @@ -46,30 +47,36 @@ class MultiLabelConfusionMatrix(Metric):
- false negatives for class i in M(1,0)
- true positives for class i in M(1,1)
```python
# multilabel confusion matrix
y_true = tf.constant([[1, 0, 1], [0, 1, 0]],
dtype=tf.int32)
y_pred = tf.constant([[1, 0, 0],[0, 1, 1]],
dtype=tf.int32)
output = MultiLabelConfusionMatrix(num_classes=3)
output.update_state(y_true, y_pred)
print('Confusion matrix:', output.result().numpy())
# Confusion matrix: [[[1 0] [0 1]] [[1 0] [0 1]]
[[0 1] [1 0]]]
# if multiclass input is provided
y_true = tf.constant([[1, 0, 0], [0, 1, 0]],
dtype=tf.int32)
y_pred = tf.constant([[1, 0, 0],[0, 0, 1]],
dtype=tf.int32)
output = MultiLabelConfusionMatrix(num_classes=3)
output.update_state(y_true, y_pred)
print('Confusion matrix:', output.result().numpy())
# Confusion matrix: [[[1 0] [0 1]] [[1 0] [1 0]] [[1 1] [0 0]]]
```
Usage:
>>> y_true = tf.constant([[1, 0, 1], [0, 1, 0]], dtype=tf.int32)
>>> y_pred = tf.constant([[1, 0, 0],[0, 1, 1]], dtype=tf.int32)
>>> output1 = tfa.metrics.MultiLabelConfusionMatrix(num_classes=3)
>>> output1.update_state(y_true, y_pred)
>>> output1.result()
<tf.Tensor: shape=(3, 2, 2), dtype=float32, numpy=
array([[[1., 0.],
[0., 1.]],
<BLANKLINE>
[[1., 0.],
[0., 1.]],
<BLANKLINE>
[[0., 1.],
[1., 0.]]], dtype=float32)>
>>> y_true = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.int32)
>>> y_pred = tf.constant([[1, 0, 0],[0, 0, 1]], dtype=tf.int32)
>>> output2 = tfa.metrics.MultiLabelConfusionMatrix(num_classes=3)
>>> output2.update_state(y_true, y_pred)
>>> output2.result()
<tf.Tensor: shape=(3, 2, 2), dtype=float32, numpy=
array([[[1., 0.],
[0., 1.]],
<BLANKLINE>
[[1., 0.],
[1., 0.]],
<BLANKLINE>
[[1., 1.],
[0., 0.]]], dtype=float32)>
"""

@typechecked
Expand Down
15 changes: 8 additions & 7 deletions tensorflow_addons/metrics/r_square.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from typing import Tuple

import tensorflow as tf

from tensorflow.keras import backend as K
from tensorflow.keras.metrics import Metric
from tensorflow.python.ops import weights_broadcast_ops
Expand Down Expand Up @@ -61,13 +62,13 @@ class RSquare(Metric):
of the same metric.
Usage:
```python
actuals = tf.constant([1, 4, 3], dtype=tf.float32)
preds = tf.constant([2, 4, 4], dtype=tf.float32)
result = tf.keras.metrics.RSquare()
result.update_state(actuals, preds)
print('R^2 score is: ', r1.result().numpy()) # 0.57142866
```
>>> actuals = tf.constant([1, 4, 3], dtype=tf.float32)
>>> preds = tf.constant([2, 4, 4], dtype=tf.float32)
>>> ans = tfa.metrics.RSquare()
>>> ans.update_state(actuals, preds)
>>> ans.result()
<tf.Tensor: shape=(), dtype=float32, numpy=0.57142854>
"""

@typechecked
Expand Down
6 changes: 2 additions & 4 deletions tensorflow_addons/optimizers/lookahead.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,8 @@ class Lookahead(tf.keras.optimizers.Optimizer):
Example of usage:
```python
opt = tf.keras.optimizers.SGD(learning_rate)
opt = tfa.optimizers.Lookahead(opt)
```
>>> opt = tf.keras.optimizers.SGD(learning_rate=0.01)
>>> opt = tfa.optimizers.Lookahead(opt)
"""

@typechecked
Expand Down
6 changes: 2 additions & 4 deletions tensorflow_addons/optimizers/moving_average.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,9 @@ class MovingAverage(AveragedOptimizerWrapper):
Example of usage:
```python
opt = tf.keras.optimizers.SGD(learning_rate)
opt = tfa.optimizers.MovingAverage(opt)
>>> opt = tf.keras.optimizers.SGD(learning_rate=0.01)
>>> opt = tfa.optimizers.MovingAverage(opt)
```
"""

@typechecked
Expand Down
Loading

0 comments on commit 706e22f

Please sign in to comment.