diff --git a/docs/api_docs/python/_redirects.yaml b/docs/api_docs/python/_redirects.yaml
new file mode 100644
index 0000000000..1ee0c7a5a7
--- /dev/null
+++ b/docs/api_docs/python/_redirects.yaml
@@ -0,0 +1,209 @@
+redirects:
+- from: /addons/api_docs/python/tfa/callbacks/time_stopping/TimeStopping
+ to: /addons/api_docs/python/tfa/callbacks/TimeStopping
+- from: /addons/api_docs/python/tfa/callbacks/tqdm_progress_bar/TQDMProgressBar
+ to: /addons/api_docs/python/tfa/callbacks/TQDMProgressBar
+- from: /addons/api_docs/python/tfa/image/distance_transform/euclidean_dist_transform
+ to: /addons/api_docs/python/tfa/image/euclidean_dist_transform
+- from: /addons/api_docs/python/tfa/image/distort_image_ops/adjust_hsv_in_yiq
+ to: /addons/api_docs/python/tfa/image/adjust_hsv_in_yiq
+- from: /addons/api_docs/python/tfa/image/distort_image_ops/random_hsv_in_yiq
+ to: /addons/api_docs/python/tfa/image/random_hsv_in_yiq
+- from: /addons/api_docs/python/tfa/image/filters/mean_filter2d
+ to: /addons/api_docs/python/tfa/image/mean_filter2d
+- from: /addons/api_docs/python/tfa/image/filters/median_filter2d
+ to: /addons/api_docs/python/tfa/image/median_filter2d
+- from: /addons/api_docs/python/tfa/image/resampler_ops/resampler
+ to: /addons/api_docs/python/tfa/image/resampler
+- from: /addons/api_docs/python/tfa/image/transform_ops/rotate
+ to: /addons/api_docs/python/tfa/image/rotate
+- from: /addons/api_docs/python/tfa/image/transform_ops/transform
+ to: /addons/api_docs/python/tfa/image/transform
+- from: /addons/api_docs/python/tfa/image/translate_ops/transform
+ to: /addons/api_docs/python/tfa/image/transform
+- from: /addons/api_docs/python/tfa/image/translate_ops/translate
+ to: /addons/api_docs/python/tfa/image/translate
+- from: /addons/api_docs/python/tfa/layers/gelu/GELU
+ to: /addons/api_docs/python/tfa/layers/GELU
+- from: /addons/api_docs/python/tfa/layers/maxout/Maxout
+ to: /addons/api_docs/python/tfa/layers/Maxout
+- from: /addons/api_docs/python/tfa/layers/normalizations/GroupNormalization
+ to: /addons/api_docs/python/tfa/layers/GroupNormalization
+- from: /addons/api_docs/python/tfa/layers/normalizations/InstanceNormalization
+ to: /addons/api_docs/python/tfa/layers/InstanceNormalization
+- from: /addons/api_docs/python/tfa/layers/optical_flow/CorrelationCost
+ to: /addons/api_docs/python/tfa/layers/CorrelationCost
+- from: /addons/api_docs/python/tfa/layers/poincare/PoincareNormalize
+ to: /addons/api_docs/python/tfa/layers/PoincareNormalize
+- from: /addons/api_docs/python/tfa/layers/sparsemax/Sparsemax
+ to: /addons/api_docs/python/tfa/layers/Sparsemax
+- from: /addons/api_docs/python/tfa/layers/sparsemax/sparsemax
+ to: /addons/api_docs/python/tfa/activations/sparsemax
+- from: /addons/api_docs/python/tfa/layers/wrappers/WeightNormalization
+ to: /addons/api_docs/python/tfa/layers/WeightNormalization
+- from: /addons/api_docs/python/tfa/losses/contrastive/ContrastiveLoss
+ to: /addons/api_docs/python/tfa/losses/ContrastiveLoss
+- from: /addons/api_docs/python/tfa/losses/contrastive/contrastive_loss
+ to: /addons/api_docs/python/tfa/losses/contrastive_loss
+- from: /addons/api_docs/python/tfa/losses/focal_loss/SigmoidFocalCrossEntropy
+ to: /addons/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy
+- from: /addons/api_docs/python/tfa/losses/focal_loss/sigmoid_focal_crossentropy
+ to: /addons/api_docs/python/tfa/losses/sigmoid_focal_crossentropy
+- from: /addons/api_docs/python/tfa/losses/lifted/LiftedStructLoss
+ to: /addons/api_docs/python/tfa/losses/LiftedStructLoss
+- from: /addons/api_docs/python/tfa/losses/lifted/lifted_struct_loss
+ to: /addons/api_docs/python/tfa/losses/lifted_struct_loss
+- from: /addons/api_docs/python/tfa/losses/npairs/NpairsLoss
+ to: /addons/api_docs/python/tfa/losses/NpairsLoss
+- from: /addons/api_docs/python/tfa/losses/npairs/NpairsMultilabelLoss
+ to: /addons/api_docs/python/tfa/losses/NpairsMultilabelLoss
+- from: /addons/api_docs/python/tfa/losses/npairs/npairs_loss
+ to: /addons/api_docs/python/tfa/losses/npairs_loss
+- from: /addons/api_docs/python/tfa/losses/npairs/npairs_multilabel_loss
+ to: /addons/api_docs/python/tfa/losses/npairs_multilabel_loss
+- from: /addons/api_docs/python/tfa/losses/triplet/TripletSemiHardLoss
+ to: /addons/api_docs/python/tfa/losses/TripletSemiHardLoss
+- from: /addons/api_docs/python/tfa/losses/triplet/triplet_semihard_loss
+ to: /addons/api_docs/python/tfa/losses/triplet_semihard_loss
+- from: /addons/api_docs/python/tfa/metrics/cohens_kappa/CohenKappa
+ to: /addons/api_docs/python/tfa/metrics/CohenKappa
+- from: /addons/api_docs/python/tfa/metrics/f_scores/F1Score
+ to: /addons/api_docs/python/tfa/metrics/F1Score
+- from: /addons/api_docs/python/tfa/metrics/f_scores/FBetaScore
+ to: /addons/api_docs/python/tfa/metrics/FBetaScore
+- from: /addons/api_docs/python/tfa/metrics/hamming/HammingLoss
+ to: /addons/api_docs/python/tfa/metrics/HammingLoss
+- from: /addons/api_docs/python/tfa/metrics/hamming/hamming_distance
+ to: /addons/api_docs/python/tfa/metrics/hamming_distance
+- from: /addons/api_docs/python/tfa/metrics/matthews_correlation_coefficient/MatthewsCorrelationCoefficient
+ to: /addons/api_docs/python/tfa/metrics/MatthewsCorrelationCoefficient
+- from: /addons/api_docs/python/tfa/metrics/multilabel_confusion_matrix/MultiLabelConfusionMatrix
+ to: /addons/api_docs/python/tfa/metrics/MultiLabelConfusionMatrix
+- from: /addons/api_docs/python/tfa/metrics/r_square/RSquare
+ to: /addons/api_docs/python/tfa/metrics/RSquare
+- from: /addons/api_docs/python/tfa/metrics/utils/MeanMetricWrapper
+ to: /addons/api_docs/python/tfa/metrics/MeanMetricWrapper
+- from: /addons/api_docs/python/tfa/optimizers/average_wrapper/AveragedOptimizerWrapper
+ to: /addons/api_docs/python/tfa/optimizers/AveragedOptimizerWrapper
+- from: /addons/api_docs/python/tfa/optimizers/conditional_gradient/ConditionalGradient
+ to: /addons/api_docs/python/tfa/optimizers/ConditionalGradient
+- from: /addons/api_docs/python/tfa/optimizers/cyclical_learning_rate/CyclicalLearningRate
+ to: /addons/api_docs/python/tfa/optimizers/CyclicalLearningRate
+- from: /addons/api_docs/python/tfa/optimizers/cyclical_learning_rate/ExponentialCyclicalLearningRate
+ to: /addons/api_docs/python/tfa/optimizers/ExponentialCyclicalLearningRate
+- from: /addons/api_docs/python/tfa/optimizers/cyclical_learning_rate/Triangular2CyclicalLearningRate
+ to: /addons/api_docs/python/tfa/optimizers/Triangular2CyclicalLearningRate
+- from: /addons/api_docs/python/tfa/optimizers/cyclical_learning_rate/TriangularCyclicalLearningRate
+ to: /addons/api_docs/python/tfa/optimizers/TriangularCyclicalLearningRate
+- from: /addons/api_docs/python/tfa/optimizers/lamb/LAMB
+ to: /addons/api_docs/python/tfa/optimizers/LAMB
+- from: /addons/api_docs/python/tfa/optimizers/lazy_adam/LazyAdam
+ to: /addons/api_docs/python/tfa/optimizers/LazyAdam
+- from: /addons/api_docs/python/tfa/optimizers/lookahead/Lookahead
+ to: /addons/api_docs/python/tfa/optimizers/Lookahead
+- from: /addons/api_docs/python/tfa/optimizers/moving_average/MovingAverage
+ to: /addons/api_docs/python/tfa/optimizers/MovingAverage
+- from: /addons/api_docs/python/tfa/optimizers/rectified_adam/RectifiedAdam
+ to: /addons/api_docs/python/tfa/optimizers/RectifiedAdam
+- from: /addons/api_docs/python/tfa/optimizers/stochastic_weight_averaging/SWA
+ to: /addons/api_docs/python/tfa/optimizers/SWA
+- from: /addons/api_docs/python/tfa/optimizers/weight_decay_optimizers/AdamW
+ to: /addons/api_docs/python/tfa/optimizers/AdamW
+- from: /addons/api_docs/python/tfa/optimizers/weight_decay_optimizers/SGDW
+ to: /addons/api_docs/python/tfa/optimizers/SGDW
+- from: /addons/api_docs/python/tfa/optimizers/weight_decay_optimizers/extend_with_decoupled_weight_decay
+ to: /addons/api_docs/python/tfa/optimizers/extend_with_decoupled_weight_decay
+- from: /addons/api_docs/python/tfa/rnn/cell/LayerNormLSTMCell
+ to: /addons/api_docs/python/tfa/rnn/LayerNormLSTMCell
+- from: /addons/api_docs/python/tfa/rnn/cell/NASCell
+ to: /addons/api_docs/python/tfa/rnn/NASCell
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/AttentionMechanism
+ to: /addons/api_docs/python/tfa/seq2seq/AttentionMechanism
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/AttentionWrapper
+ to: /addons/api_docs/python/tfa/seq2seq/AttentionWrapper
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/AttentionWrapperState
+ to: /addons/api_docs/python/tfa/seq2seq/AttentionWrapperState
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/BahdanauAttention
+ to: /addons/api_docs/python/tfa/seq2seq/BahdanauAttention
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/BahdanauMonotonicAttention
+ to: /addons/api_docs/python/tfa/seq2seq/BahdanauMonotonicAttention
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/LuongAttention
+ to: /addons/api_docs/python/tfa/seq2seq/LuongAttention
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/LuongMonotonicAttention
+ to: /addons/api_docs/python/tfa/seq2seq/LuongMonotonicAttention
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/hardmax
+ to: /addons/api_docs/python/tfa/seq2seq/hardmax
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/monotonic_attention
+ to: /addons/api_docs/python/tfa/seq2seq/monotonic_attention
+- from: /addons/api_docs/python/tfa/seq2seq/attention_wrapper/safe_cumprod
+ to: /addons/api_docs/python/tfa/seq2seq/safe_cumprod
+- from: /addons/api_docs/python/tfa/seq2seq/basic_decoder/BasicDecoder
+ to: /addons/api_docs/python/tfa/seq2seq/BasicDecoder
+- from: /addons/api_docs/python/tfa/seq2seq/basic_decoder/BasicDecoderOutput
+ to: /addons/api_docs/python/tfa/seq2seq/BasicDecoderOutput
+- from: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder/BeamSearchDecoder
+ to: /addons/api_docs/python/tfa/seq2seq/BeamSearchDecoder
+- from: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder/BeamSearchDecoderOutput
+ to: /addons/api_docs/python/tfa/seq2seq/BeamSearchDecoderOutput
+- from: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder/BeamSearchDecoderState
+ to: /addons/api_docs/python/tfa/seq2seq/BeamSearchDecoderState
+- from: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder/FinalBeamSearchDecoderOutput
+ to: /addons/api_docs/python/tfa/seq2seq/FinalBeamSearchDecoderOutput
+- from: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder/gather_tree_from_array
+ to: /addons/api_docs/python/tfa/seq2seq/gather_tree_from_array
+- from: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder/tile_batch
+ to: /addons/api_docs/python/tfa/seq2seq/tile_batch
+- from: /addons/api_docs/python/tfa/seq2seq/decoder/BaseDecoder
+ to: /addons/api_docs/python/tfa/seq2seq/BaseDecoder
+- from: /addons/api_docs/python/tfa/seq2seq/decoder/Decoder
+ to: /addons/api_docs/python/tfa/seq2seq/Decoder
+- from: /addons/api_docs/python/tfa/seq2seq/decoder/dynamic_decode
+ to: /addons/api_docs/python/tfa/seq2seq/dynamic_decode
+- from: /addons/api_docs/python/tfa/seq2seq/loss/SequenceLoss
+ to: /addons/api_docs/python/tfa/seq2seq/SequenceLoss
+- from: /addons/api_docs/python/tfa/seq2seq/loss/sequence_loss
+ to: /addons/api_docs/python/tfa/seq2seq/sequence_loss
+- from: /addons/api_docs/python/tfa/seq2seq/sampler/CustomSampler
+ to: /addons/api_docs/python/tfa/seq2seq/CustomSampler
+- from: /addons/api_docs/python/tfa/seq2seq/sampler/GreedyEmbeddingSampler
+ to: /addons/api_docs/python/tfa/seq2seq/GreedyEmbeddingSampler
+- from: /addons/api_docs/python/tfa/seq2seq/sampler/InferenceSampler
+ to: /addons/api_docs/python/tfa/seq2seq/InferenceSampler
+- from: /addons/api_docs/python/tfa/seq2seq/sampler/SampleEmbeddingSampler
+ to: /addons/api_docs/python/tfa/seq2seq/SampleEmbeddingSampler
+- from: /addons/api_docs/python/tfa/seq2seq/sampler/Sampler
+ to: /addons/api_docs/python/tfa/seq2seq/Sampler
+- from: /addons/api_docs/python/tfa/seq2seq/sampler/ScheduledEmbeddingTrainingSampler
+ to: /addons/api_docs/python/tfa/seq2seq/ScheduledEmbeddingTrainingSampler
+- from: /addons/api_docs/python/tfa/seq2seq/sampler/ScheduledOutputTrainingSampler
+ to: /addons/api_docs/python/tfa/seq2seq/ScheduledOutputTrainingSampler
+- from: /addons/api_docs/python/tfa/seq2seq/sampler/TrainingSampler
+ to: /addons/api_docs/python/tfa/seq2seq/TrainingSampler
+- from: /addons/api_docs/python/tfa/text/crf/crf_binary_score
+ to: /addons/api_docs/python/tfa/text/crf_binary_score
+- from: /addons/api_docs/python/tfa/text/crf/crf_decode
+ to: /addons/api_docs/python/tfa/text/crf_decode
+- from: /addons/api_docs/python/tfa/text/crf/crf_decode_backward
+ to: /addons/api_docs/python/tfa/text/crf_decode_backward
+- from: /addons/api_docs/python/tfa/text/crf/crf_decode_forward
+ to: /addons/api_docs/python/tfa/text/crf_decode_forward
+- from: /addons/api_docs/python/tfa/text/crf/crf_forward
+ to: /addons/api_docs/python/tfa/text/crf_forward
+- from: /addons/api_docs/python/tfa/text/crf/crf_log_likelihood
+ to: /addons/api_docs/python/tfa/text/crf_log_likelihood
+- from: /addons/api_docs/python/tfa/text/crf/crf_log_norm
+ to: /addons/api_docs/python/tfa/text/crf_log_norm
+- from: /addons/api_docs/python/tfa/text/crf/crf_multitag_sequence_score
+ to: /addons/api_docs/python/tfa/text/crf_multitag_sequence_score
+- from: /addons/api_docs/python/tfa/text/crf/crf_sequence_score
+ to: /addons/api_docs/python/tfa/text/crf_sequence_score
+- from: /addons/api_docs/python/tfa/text/crf/crf_unary_score
+ to: /addons/api_docs/python/tfa/text/crf_unary_score
+- from: /addons/api_docs/python/tfa/text/crf/viterbi_decode
+ to: /addons/api_docs/python/tfa/text/viterbi_decode
+- from: /addons/api_docs/python/tfa/text/parse_time_op/parse_time
+ to: /addons/api_docs/python/tfa/text/parse_time
+- from: /addons/api_docs/python/tfa/text/skip_gram_ops/skip_gram_sample
+ to: /addons/api_docs/python/tfa/text/skip_gram_sample
+- from: /addons/api_docs/python/tfa/text/skip_gram_ops/skip_gram_sample_with_text_vocab
+ to: /addons/api_docs/python/tfa/text/skip_gram_sample_with_text_vocab
diff --git a/docs/api_docs/python/_toc.yaml b/docs/api_docs/python/_toc.yaml
new file mode 100644
index 0000000000..04bffb8291
--- /dev/null
+++ b/docs/api_docs/python/_toc.yaml
@@ -0,0 +1,513 @@
+toc:
+- title: tfa
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa
+- title: tfa.activations
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/activations
+ - title: gelu
+ path: /addons/api_docs/python/tfa/activations/gelu
+ - title: hardshrink
+ path: /addons/api_docs/python/tfa/activations/hardshrink
+ - title: lisht
+ path: /addons/api_docs/python/tfa/activations/lisht
+ - title: mish
+ path: /addons/api_docs/python/tfa/activations/mish
+ - title: rrelu
+ path: /addons/api_docs/python/tfa/activations/rrelu
+ - title: softshrink
+ path: /addons/api_docs/python/tfa/activations/softshrink
+ - title: sparsemax
+ path: /addons/api_docs/python/tfa/activations/sparsemax
+ - title: tanhshrink
+ path: /addons/api_docs/python/tfa/activations/tanhshrink
+- title: tfa.callbacks
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/callbacks
+ - title: TimeStopping
+ path: /addons/api_docs/python/tfa/callbacks/TimeStopping
+ - title: TQDMProgressBar
+ path: /addons/api_docs/python/tfa/callbacks/TQDMProgressBar
+ - title: time_stopping
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/callbacks/time_stopping
+ - title: tqdm_progress_bar
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/callbacks/tqdm_progress_bar
+- title: tfa.image
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/image
+ - title: adjust_hsv_in_yiq
+ path: /addons/api_docs/python/tfa/image/adjust_hsv_in_yiq
+ - title: connected_components
+ path: /addons/api_docs/python/tfa/image/connected_components
+ - title: dense_image_warp
+ path: /addons/api_docs/python/tfa/image/dense_image_warp
+ - title: euclidean_dist_transform
+ path: /addons/api_docs/python/tfa/image/euclidean_dist_transform
+ - title: interpolate_bilinear
+ path: /addons/api_docs/python/tfa/image/interpolate_bilinear
+ - title: interpolate_spline
+ path: /addons/api_docs/python/tfa/image/interpolate_spline
+ - title: mean_filter2d
+ path: /addons/api_docs/python/tfa/image/mean_filter2d
+ - title: median_filter2d
+ path: /addons/api_docs/python/tfa/image/median_filter2d
+ - title: random_hsv_in_yiq
+ path: /addons/api_docs/python/tfa/image/random_hsv_in_yiq
+ - title: resampler
+ path: /addons/api_docs/python/tfa/image/resampler
+ - title: rotate
+ path: /addons/api_docs/python/tfa/image/rotate
+ - title: sparse_image_warp
+ path: /addons/api_docs/python/tfa/image/sparse_image_warp
+ - title: transform
+ path: /addons/api_docs/python/tfa/image/transform
+ - title: translate
+ path: /addons/api_docs/python/tfa/image/translate
+ - title: distance_transform
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/image/distance_transform
+ - title: distort_image_ops
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/image/distort_image_ops
+ - title: filters
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/image/filters
+ - title: resampler_ops
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/image/resampler_ops
+ - title: transform_ops
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/image/transform_ops
+ - title: angles_to_projective_transforms
+ path: /addons/api_docs/python/tfa/image/transform_ops/angles_to_projective_transforms
+ - title: compose_transforms
+ path: /addons/api_docs/python/tfa/image/transform_ops/compose_transforms
+ - title: flat_transforms_to_matrices
+ path: /addons/api_docs/python/tfa/image/transform_ops/flat_transforms_to_matrices
+ - title: matrices_to_flat_transforms
+ path: /addons/api_docs/python/tfa/image/transform_ops/matrices_to_flat_transforms
+ - title: translate_ops
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/image/translate_ops
+ - title: translations_to_projective_transforms
+ path: /addons/api_docs/python/tfa/image/translate_ops/translations_to_projective_transforms
+ - title: utils
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/image/utils
+ - title: from_4D_image
+ path: /addons/api_docs/python/tfa/image/utils/from_4D_image
+ - title: get_ndims
+ path: /addons/api_docs/python/tfa/image/utils/get_ndims
+ - title: to_4D_image
+ path: /addons/api_docs/python/tfa/image/utils/to_4D_image
+- title: tfa.layers
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/layers
+ - title: CorrelationCost
+ path: /addons/api_docs/python/tfa/layers/CorrelationCost
+ - title: GELU
+ path: /addons/api_docs/python/tfa/layers/GELU
+ - title: GroupNormalization
+ path: /addons/api_docs/python/tfa/layers/GroupNormalization
+ - title: InstanceNormalization
+ path: /addons/api_docs/python/tfa/layers/InstanceNormalization
+ - title: Maxout
+ path: /addons/api_docs/python/tfa/layers/Maxout
+ - title: PoincareNormalize
+ path: /addons/api_docs/python/tfa/layers/PoincareNormalize
+ - title: Sparsemax
+ path: /addons/api_docs/python/tfa/layers/Sparsemax
+ - title: WeightNormalization
+ path: /addons/api_docs/python/tfa/layers/WeightNormalization
+ - title: gelu
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/layers/gelu
+ - title: maxout
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/layers/maxout
+ - title: normalizations
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/layers/normalizations
+ - title: optical_flow
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/layers/optical_flow
+ - title: poincare
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/layers/poincare
+ - title: sparsemax
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/layers/sparsemax
+ - title: wrappers
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/layers/wrappers
+- title: tfa.losses
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/losses
+ - title: ContrastiveLoss
+ path: /addons/api_docs/python/tfa/losses/ContrastiveLoss
+ - title: contrastive_loss
+ path: /addons/api_docs/python/tfa/losses/contrastive_loss
+ - title: GIoULoss
+ path: /addons/api_docs/python/tfa/losses/GIoULoss
+ - title: giou_loss
+ path: /addons/api_docs/python/tfa/losses/giou_loss
+ - title: LiftedStructLoss
+ path: /addons/api_docs/python/tfa/losses/LiftedStructLoss
+ - title: lifted_struct_loss
+ path: /addons/api_docs/python/tfa/losses/lifted_struct_loss
+ - title: NpairsLoss
+ path: /addons/api_docs/python/tfa/losses/NpairsLoss
+ - title: NpairsMultilabelLoss
+ path: /addons/api_docs/python/tfa/losses/NpairsMultilabelLoss
+ - title: npairs_loss
+ path: /addons/api_docs/python/tfa/losses/npairs_loss
+ - title: npairs_multilabel_loss
+ path: /addons/api_docs/python/tfa/losses/npairs_multilabel_loss
+ - title: SigmoidFocalCrossEntropy
+ path: /addons/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy
+ - title: sigmoid_focal_crossentropy
+ path: /addons/api_docs/python/tfa/losses/sigmoid_focal_crossentropy
+ - title: SparsemaxLoss
+ path: /addons/api_docs/python/tfa/losses/SparsemaxLoss
+ - title: sparsemax_loss
+ path: /addons/api_docs/python/tfa/losses/sparsemax_loss
+ - title: TripletSemiHardLoss
+ path: /addons/api_docs/python/tfa/losses/TripletSemiHardLoss
+ - title: triplet_semihard_loss
+ path: /addons/api_docs/python/tfa/losses/triplet_semihard_loss
+ - title: contrastive
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/losses/contrastive
+ - title: focal_loss
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/losses/focal_loss
+ - title: lifted
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/losses/lifted
+ - title: metric_learning
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/losses/metric_learning
+ - title: pairwise_distance
+ path: /addons/api_docs/python/tfa/losses/metric_learning/pairwise_distance
+ - title: npairs
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/losses/npairs
+ - title: triplet
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/losses/triplet
+- title: tfa.metrics
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/metrics
+ - title: CohenKappa
+ path: /addons/api_docs/python/tfa/metrics/CohenKappa
+ - title: F1Score
+ path: /addons/api_docs/python/tfa/metrics/F1Score
+ - title: FBetaScore
+ path: /addons/api_docs/python/tfa/metrics/FBetaScore
+ - title: HammingLoss
+ path: /addons/api_docs/python/tfa/metrics/HammingLoss
+ - title: hamming_distance
+ path: /addons/api_docs/python/tfa/metrics/hamming_distance
+ - title: MatthewsCorrelationCoefficient
+ path: /addons/api_docs/python/tfa/metrics/MatthewsCorrelationCoefficient
+ - title: MeanMetricWrapper
+ path: /addons/api_docs/python/tfa/metrics/MeanMetricWrapper
+ - title: MultiLabelConfusionMatrix
+ path: /addons/api_docs/python/tfa/metrics/MultiLabelConfusionMatrix
+ - title: RSquare
+ path: /addons/api_docs/python/tfa/metrics/RSquare
+ - title: cohens_kappa
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/metrics/cohens_kappa
+ - title: f_scores
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/metrics/f_scores
+ - title: hamming
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/metrics/hamming
+ - title: hamming_loss_fn
+ path: /addons/api_docs/python/tfa/metrics/hamming/hamming_loss_fn
+ - title: matthews_correlation_coefficient
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/metrics/matthews_correlation_coefficient
+ - title: multilabel_confusion_matrix
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/metrics/multilabel_confusion_matrix
+ - title: r_square
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/metrics/r_square
+ - title: utils
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/metrics/utils
+- title: tfa.optimizers
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers
+ - title: AdamW
+ path: /addons/api_docs/python/tfa/optimizers/AdamW
+ - title: AveragedOptimizerWrapper
+ path: /addons/api_docs/python/tfa/optimizers/AveragedOptimizerWrapper
+ - title: ConditionalGradient
+ path: /addons/api_docs/python/tfa/optimizers/ConditionalGradient
+ - title: CyclicalLearningRate
+ path: /addons/api_docs/python/tfa/optimizers/CyclicalLearningRate
+ - title: ExponentialCyclicalLearningRate
+ path: /addons/api_docs/python/tfa/optimizers/ExponentialCyclicalLearningRate
+ - title: extend_with_decoupled_weight_decay
+ path: /addons/api_docs/python/tfa/optimizers/extend_with_decoupled_weight_decay
+ - title: LAMB
+ path: /addons/api_docs/python/tfa/optimizers/LAMB
+ - title: LazyAdam
+ path: /addons/api_docs/python/tfa/optimizers/LazyAdam
+ - title: Lookahead
+ path: /addons/api_docs/python/tfa/optimizers/Lookahead
+ - title: MovingAverage
+ path: /addons/api_docs/python/tfa/optimizers/MovingAverage
+ - title: RectifiedAdam
+ path: /addons/api_docs/python/tfa/optimizers/RectifiedAdam
+ - title: SGDW
+ path: /addons/api_docs/python/tfa/optimizers/SGDW
+ - title: SWA
+ path: /addons/api_docs/python/tfa/optimizers/SWA
+ - title: Triangular2CyclicalLearningRate
+ path: /addons/api_docs/python/tfa/optimizers/Triangular2CyclicalLearningRate
+ - title: TriangularCyclicalLearningRate
+ path: /addons/api_docs/python/tfa/optimizers/TriangularCyclicalLearningRate
+ - title: average_wrapper
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/average_wrapper
+ - title: conditional_gradient
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/conditional_gradient
+ - title: cyclical_learning_rate
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/cyclical_learning_rate
+ - title: lamb
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/lamb
+ - title: lazy_adam
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/lazy_adam
+ - title: lookahead
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/lookahead
+ - title: moving_average
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/moving_average
+ - title: rectified_adam
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/rectified_adam
+ - title: stochastic_weight_averaging
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/stochastic_weight_averaging
+ - title: weight_decay_optimizers
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/optimizers/weight_decay_optimizers
+ - title: DecoupledWeightDecayExtension
+ path: /addons/api_docs/python/tfa/optimizers/weight_decay_optimizers/DecoupledWeightDecayExtension
+- title: tfa.rnn
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/rnn
+ - title: LayerNormLSTMCell
+ path: /addons/api_docs/python/tfa/rnn/LayerNormLSTMCell
+ - title: NASCell
+ path: /addons/api_docs/python/tfa/rnn/NASCell
+ - title: cell
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/rnn/cell
+- title: tfa.seq2seq
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/seq2seq
+ - title: AttentionMechanism
+ path: /addons/api_docs/python/tfa/seq2seq/AttentionMechanism
+ - title: AttentionWrapper
+ path: /addons/api_docs/python/tfa/seq2seq/AttentionWrapper
+ - title: AttentionWrapperState
+ path: /addons/api_docs/python/tfa/seq2seq/AttentionWrapperState
+ - title: BahdanauAttention
+ path: /addons/api_docs/python/tfa/seq2seq/BahdanauAttention
+ - title: BahdanauMonotonicAttention
+ path: /addons/api_docs/python/tfa/seq2seq/BahdanauMonotonicAttention
+ - title: BaseDecoder
+ path: /addons/api_docs/python/tfa/seq2seq/BaseDecoder
+ - title: BasicDecoder
+ path: /addons/api_docs/python/tfa/seq2seq/BasicDecoder
+ - title: BasicDecoderOutput
+ path: /addons/api_docs/python/tfa/seq2seq/BasicDecoderOutput
+ - title: BeamSearchDecoder
+ path: /addons/api_docs/python/tfa/seq2seq/BeamSearchDecoder
+ - title: BeamSearchDecoderOutput
+ path: /addons/api_docs/python/tfa/seq2seq/BeamSearchDecoderOutput
+ - title: BeamSearchDecoderState
+ path: /addons/api_docs/python/tfa/seq2seq/BeamSearchDecoderState
+ - title: CustomSampler
+ path: /addons/api_docs/python/tfa/seq2seq/CustomSampler
+ - title: Decoder
+ path: /addons/api_docs/python/tfa/seq2seq/Decoder
+ - title: dynamic_decode
+ path: /addons/api_docs/python/tfa/seq2seq/dynamic_decode
+ - title: FinalBeamSearchDecoderOutput
+ path: /addons/api_docs/python/tfa/seq2seq/FinalBeamSearchDecoderOutput
+ - title: gather_tree_from_array
+ path: /addons/api_docs/python/tfa/seq2seq/gather_tree_from_array
+ - title: GreedyEmbeddingSampler
+ path: /addons/api_docs/python/tfa/seq2seq/GreedyEmbeddingSampler
+ - title: hardmax
+ path: /addons/api_docs/python/tfa/seq2seq/hardmax
+ - title: InferenceSampler
+ path: /addons/api_docs/python/tfa/seq2seq/InferenceSampler
+ - title: LuongAttention
+ path: /addons/api_docs/python/tfa/seq2seq/LuongAttention
+ - title: LuongMonotonicAttention
+ path: /addons/api_docs/python/tfa/seq2seq/LuongMonotonicAttention
+ - title: monotonic_attention
+ path: /addons/api_docs/python/tfa/seq2seq/monotonic_attention
+ - title: safe_cumprod
+ path: /addons/api_docs/python/tfa/seq2seq/safe_cumprod
+ - title: SampleEmbeddingSampler
+ path: /addons/api_docs/python/tfa/seq2seq/SampleEmbeddingSampler
+ - title: Sampler
+ path: /addons/api_docs/python/tfa/seq2seq/Sampler
+ - title: ScheduledEmbeddingTrainingSampler
+ path: /addons/api_docs/python/tfa/seq2seq/ScheduledEmbeddingTrainingSampler
+ - title: ScheduledOutputTrainingSampler
+ path: /addons/api_docs/python/tfa/seq2seq/ScheduledOutputTrainingSampler
+ - title: SequenceLoss
+ path: /addons/api_docs/python/tfa/seq2seq/SequenceLoss
+ - title: sequence_loss
+ path: /addons/api_docs/python/tfa/seq2seq/sequence_loss
+ - title: tile_batch
+ path: /addons/api_docs/python/tfa/seq2seq/tile_batch
+ - title: TrainingSampler
+ path: /addons/api_docs/python/tfa/seq2seq/TrainingSampler
+ - title: attention_wrapper
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/seq2seq/attention_wrapper
+ - title: basic_decoder
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/seq2seq/basic_decoder
+ - title: beam_search_decoder
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder
+ - title: attention_probs_from_attn_state
+ path: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder/attention_probs_from_attn_state
+ - title: BeamSearchDecoderMixin
+ path: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder/BeamSearchDecoderMixin
+ - title: get_attention_probs
+ path: /addons/api_docs/python/tfa/seq2seq/beam_search_decoder/get_attention_probs
+ - title: decoder
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/seq2seq/decoder
+ - title: loss
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/seq2seq/loss
+ - title: sampler
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/seq2seq/sampler
+ - title: bernoulli_sample
+ path: /addons/api_docs/python/tfa/seq2seq/sampler/bernoulli_sample
+ - title: categorical_sample
+ path: /addons/api_docs/python/tfa/seq2seq/sampler/categorical_sample
+- title: tfa.text
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/text
+ - title: crf_binary_score
+ path: /addons/api_docs/python/tfa/text/crf_binary_score
+ - title: crf_decode
+ path: /addons/api_docs/python/tfa/text/crf_decode
+ - title: crf_decode_backward
+ path: /addons/api_docs/python/tfa/text/crf_decode_backward
+ - title: crf_decode_forward
+ path: /addons/api_docs/python/tfa/text/crf_decode_forward
+ - title: crf_forward
+ path: /addons/api_docs/python/tfa/text/crf_forward
+ - title: crf_log_likelihood
+ path: /addons/api_docs/python/tfa/text/crf_log_likelihood
+ - title: crf_log_norm
+ path: /addons/api_docs/python/tfa/text/crf_log_norm
+ - title: crf_multitag_sequence_score
+ path: /addons/api_docs/python/tfa/text/crf_multitag_sequence_score
+ - title: crf_sequence_score
+ path: /addons/api_docs/python/tfa/text/crf_sequence_score
+ - title: crf_unary_score
+ path: /addons/api_docs/python/tfa/text/crf_unary_score
+ - title: parse_time
+ path: /addons/api_docs/python/tfa/text/parse_time
+ - title: skip_gram_sample
+ path: /addons/api_docs/python/tfa/text/skip_gram_sample
+ - title: skip_gram_sample_with_text_vocab
+ path: /addons/api_docs/python/tfa/text/skip_gram_sample_with_text_vocab
+ - title: viterbi_decode
+ path: /addons/api_docs/python/tfa/text/viterbi_decode
+ - title: crf
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/text/crf
+ - title: CrfDecodeForwardRnnCell
+ path: /addons/api_docs/python/tfa/text/crf/CrfDecodeForwardRnnCell
+ - title: parse_time_op
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/text/parse_time_op
+ - title: skip_gram_ops
+ section:
+ - title: Overview
+ path: /addons/api_docs/python/tfa/text/skip_gram_ops
diff --git a/docs/api_docs/python/index.md b/docs/api_docs/python/index.md
new file mode 100644
index 0000000000..114ab5da9f
--- /dev/null
+++ b/docs/api_docs/python/index.md
@@ -0,0 +1,303 @@
+# All symbols in TensorFlow Addons
+
+## Primary symbols
+* tfa
+* tfa.activations
+* tfa.activations.gelu
+* tfa.activations.hardshrink
+* tfa.activations.lisht
+* tfa.activations.mish
+* tfa.activations.rrelu
+* tfa.activations.softshrink
+* tfa.activations.sparsemax
+* tfa.activations.tanhshrink
+* tfa.callbacks
+* tfa.callbacks.TQDMProgressBar
+* tfa.callbacks.TimeStopping
+* tfa.callbacks.time_stopping
+* tfa.callbacks.time_stopping.TimeStopping
+* tfa.callbacks.tqdm_progress_bar
+* tfa.callbacks.tqdm_progress_bar.TQDMProgressBar
+* tfa.image
+* tfa.image.adjust_hsv_in_yiq
+* tfa.image.connected_components
+* tfa.image.dense_image_warp
+* tfa.image.distance_transform
+* tfa.image.distance_transform.euclidean_dist_transform
+* tfa.image.distort_image_ops
+* tfa.image.distort_image_ops.adjust_hsv_in_yiq
+* tfa.image.distort_image_ops.random_hsv_in_yiq
+* tfa.image.euclidean_dist_transform
+* tfa.image.filters
+* tfa.image.filters.mean_filter2d
+* tfa.image.filters.median_filter2d
+* tfa.image.interpolate_bilinear
+* tfa.image.interpolate_spline
+* tfa.image.mean_filter2d
+* tfa.image.median_filter2d
+* tfa.image.random_hsv_in_yiq
+* tfa.image.resampler
+* tfa.image.resampler_ops
+* tfa.image.resampler_ops.resampler
+* tfa.image.rotate
+* tfa.image.sparse_image_warp
+* tfa.image.transform
+* tfa.image.transform_ops
+* tfa.image.transform_ops.angles_to_projective_transforms
+* tfa.image.transform_ops.compose_transforms
+* tfa.image.transform_ops.flat_transforms_to_matrices
+* tfa.image.transform_ops.matrices_to_flat_transforms
+* tfa.image.transform_ops.rotate
+* tfa.image.transform_ops.transform
+* tfa.image.translate
+* tfa.image.translate_ops
+* tfa.image.translate_ops.transform
+* tfa.image.translate_ops.translate
+* tfa.image.translate_ops.translations_to_projective_transforms
+* tfa.image.utils
+* tfa.image.utils.from_4D_image
+* tfa.image.utils.get_ndims
+* tfa.image.utils.to_4D_image
+* tfa.layers
+* tfa.layers.CorrelationCost
+* tfa.layers.GELU
+* tfa.layers.GroupNormalization
+* tfa.layers.InstanceNormalization
+* tfa.layers.Maxout
+* tfa.layers.PoincareNormalize
+* tfa.layers.Sparsemax
+* tfa.layers.WeightNormalization
+* tfa.layers.gelu
+* tfa.layers.gelu.GELU
+* tfa.layers.maxout
+* tfa.layers.maxout.Maxout
+* tfa.layers.normalizations
+* tfa.layers.normalizations.GroupNormalization
+* tfa.layers.normalizations.InstanceNormalization
+* tfa.layers.optical_flow
+* tfa.layers.optical_flow.CorrelationCost
+* tfa.layers.poincare
+* tfa.layers.poincare.PoincareNormalize
+* tfa.layers.sparsemax
+* tfa.layers.sparsemax.Sparsemax
+* tfa.layers.sparsemax.sparsemax
+* tfa.layers.wrappers
+* tfa.layers.wrappers.WeightNormalization
+* tfa.losses
+* tfa.losses.ContrastiveLoss
+* tfa.losses.GIoULoss
+* tfa.losses.LiftedStructLoss
+* tfa.losses.NpairsLoss
+* tfa.losses.NpairsMultilabelLoss
+* tfa.losses.SigmoidFocalCrossEntropy
+* tfa.losses.SparsemaxLoss
+* tfa.losses.TripletSemiHardLoss
+* tfa.losses.contrastive
+* tfa.losses.contrastive.ContrastiveLoss
+* tfa.losses.contrastive.contrastive_loss
+* tfa.losses.contrastive_loss
+* tfa.losses.focal_loss
+* tfa.losses.focal_loss.SigmoidFocalCrossEntropy
+* tfa.losses.focal_loss.sigmoid_focal_crossentropy
+* tfa.losses.giou_loss
+* tfa.losses.lifted
+* tfa.losses.lifted.LiftedStructLoss
+* tfa.losses.lifted.lifted_struct_loss
+* tfa.losses.lifted_struct_loss
+* tfa.losses.metric_learning
+* tfa.losses.metric_learning.pairwise_distance
+* tfa.losses.npairs
+* tfa.losses.npairs.NpairsLoss
+* tfa.losses.npairs.NpairsMultilabelLoss
+* tfa.losses.npairs.npairs_loss
+* tfa.losses.npairs.npairs_multilabel_loss
+* tfa.losses.npairs_loss
+* tfa.losses.npairs_multilabel_loss
+* tfa.losses.sigmoid_focal_crossentropy
+* tfa.losses.sparsemax_loss
+* tfa.losses.triplet
+* tfa.losses.triplet.TripletSemiHardLoss
+* tfa.losses.triplet.triplet_semihard_loss
+* tfa.losses.triplet_semihard_loss
+* tfa.metrics
+* tfa.metrics.CohenKappa
+* tfa.metrics.F1Score
+* tfa.metrics.FBetaScore
+* tfa.metrics.HammingLoss
+* tfa.metrics.MatthewsCorrelationCoefficient
+* tfa.metrics.MeanMetricWrapper
+* tfa.metrics.MultiLabelConfusionMatrix
+* tfa.metrics.RSquare
+* tfa.metrics.cohens_kappa
+* tfa.metrics.cohens_kappa.CohenKappa
+* tfa.metrics.f_scores
+* tfa.metrics.f_scores.F1Score
+* tfa.metrics.f_scores.FBetaScore
+* tfa.metrics.hamming
+* tfa.metrics.hamming.HammingLoss
+* tfa.metrics.hamming.hamming_distance
+* tfa.metrics.hamming.hamming_loss_fn
+* tfa.metrics.hamming_distance
+* tfa.metrics.matthews_correlation_coefficient
+* tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient
+* tfa.metrics.multilabel_confusion_matrix
+* tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix
+* tfa.metrics.r_square
+* tfa.metrics.r_square.RSquare
+* tfa.metrics.utils
+* tfa.metrics.utils.MeanMetricWrapper
+* tfa.optimizers
+* tfa.optimizers.AdamW
+* tfa.optimizers.AveragedOptimizerWrapper
+* tfa.optimizers.ConditionalGradient
+* tfa.optimizers.CyclicalLearningRate
+* tfa.optimizers.ExponentialCyclicalLearningRate
+* tfa.optimizers.LAMB
+* tfa.optimizers.LazyAdam
+* tfa.optimizers.Lookahead
+* tfa.optimizers.MovingAverage
+* tfa.optimizers.RectifiedAdam
+* tfa.optimizers.SGDW
+* tfa.optimizers.SWA
+* tfa.optimizers.Triangular2CyclicalLearningRate
+* tfa.optimizers.TriangularCyclicalLearningRate
+* tfa.optimizers.average_wrapper
+* tfa.optimizers.average_wrapper.AveragedOptimizerWrapper
+* tfa.optimizers.conditional_gradient
+* tfa.optimizers.conditional_gradient.ConditionalGradient
+* tfa.optimizers.cyclical_learning_rate
+* tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate
+* tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate
+* tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate
+* tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate
+* tfa.optimizers.extend_with_decoupled_weight_decay
+* tfa.optimizers.lamb
+* tfa.optimizers.lamb.LAMB
+* tfa.optimizers.lazy_adam
+* tfa.optimizers.lazy_adam.LazyAdam
+* tfa.optimizers.lookahead
+* tfa.optimizers.lookahead.Lookahead
+* tfa.optimizers.moving_average
+* tfa.optimizers.moving_average.MovingAverage
+* tfa.optimizers.rectified_adam
+* tfa.optimizers.rectified_adam.RectifiedAdam
+* tfa.optimizers.stochastic_weight_averaging
+* tfa.optimizers.stochastic_weight_averaging.SWA
+* tfa.optimizers.weight_decay_optimizers
+* tfa.optimizers.weight_decay_optimizers.AdamW
+* tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension
+* tfa.optimizers.weight_decay_optimizers.SGDW
+* tfa.optimizers.weight_decay_optimizers.extend_with_decoupled_weight_decay
+* tfa.rnn
+* tfa.rnn.LayerNormLSTMCell
+* tfa.rnn.NASCell
+* tfa.rnn.cell
+* tfa.rnn.cell.LayerNormLSTMCell
+* tfa.rnn.cell.NASCell
+* tfa.seq2seq
+* tfa.seq2seq.AttentionMechanism
+* tfa.seq2seq.AttentionWrapper
+* tfa.seq2seq.AttentionWrapperState
+* tfa.seq2seq.BahdanauAttention
+* tfa.seq2seq.BahdanauMonotonicAttention
+* tfa.seq2seq.BaseDecoder
+* tfa.seq2seq.BasicDecoder
+* tfa.seq2seq.BasicDecoderOutput
+* tfa.seq2seq.BeamSearchDecoder
+* tfa.seq2seq.BeamSearchDecoderOutput
+* tfa.seq2seq.BeamSearchDecoderState
+* tfa.seq2seq.CustomSampler
+* tfa.seq2seq.Decoder
+* tfa.seq2seq.FinalBeamSearchDecoderOutput
+* tfa.seq2seq.GreedyEmbeddingSampler
+* tfa.seq2seq.InferenceSampler
+* tfa.seq2seq.LuongAttention
+* tfa.seq2seq.LuongMonotonicAttention
+* tfa.seq2seq.SampleEmbeddingSampler
+* tfa.seq2seq.Sampler
+* tfa.seq2seq.ScheduledEmbeddingTrainingSampler
+* tfa.seq2seq.ScheduledOutputTrainingSampler
+* tfa.seq2seq.SequenceLoss
+* tfa.seq2seq.TrainingSampler
+* tfa.seq2seq.attention_wrapper
+* tfa.seq2seq.attention_wrapper.AttentionMechanism
+* tfa.seq2seq.attention_wrapper.AttentionWrapper
+* tfa.seq2seq.attention_wrapper.AttentionWrapperState
+* tfa.seq2seq.attention_wrapper.BahdanauAttention
+* tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention
+* tfa.seq2seq.attention_wrapper.LuongAttention
+* tfa.seq2seq.attention_wrapper.LuongMonotonicAttention
+* tfa.seq2seq.attention_wrapper.hardmax
+* tfa.seq2seq.attention_wrapper.monotonic_attention
+* tfa.seq2seq.attention_wrapper.safe_cumprod
+* tfa.seq2seq.basic_decoder
+* tfa.seq2seq.basic_decoder.BasicDecoder
+* tfa.seq2seq.basic_decoder.BasicDecoderOutput
+* tfa.seq2seq.beam_search_decoder
+* tfa.seq2seq.beam_search_decoder.BeamSearchDecoder
+* tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin
+* tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput
+* tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState
+* tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput
+* tfa.seq2seq.beam_search_decoder.attention_probs_from_attn_state
+* tfa.seq2seq.beam_search_decoder.gather_tree_from_array
+* tfa.seq2seq.beam_search_decoder.get_attention_probs
+* tfa.seq2seq.beam_search_decoder.tile_batch
+* tfa.seq2seq.decoder
+* tfa.seq2seq.decoder.BaseDecoder
+* tfa.seq2seq.decoder.Decoder
+* tfa.seq2seq.decoder.dynamic_decode
+* tfa.seq2seq.dynamic_decode
+* tfa.seq2seq.gather_tree_from_array
+* tfa.seq2seq.hardmax
+* tfa.seq2seq.loss
+* tfa.seq2seq.loss.SequenceLoss
+* tfa.seq2seq.loss.sequence_loss
+* tfa.seq2seq.monotonic_attention
+* tfa.seq2seq.safe_cumprod
+* tfa.seq2seq.sampler
+* tfa.seq2seq.sampler.CustomSampler
+* tfa.seq2seq.sampler.GreedyEmbeddingSampler
+* tfa.seq2seq.sampler.InferenceSampler
+* tfa.seq2seq.sampler.SampleEmbeddingSampler
+* tfa.seq2seq.sampler.Sampler
+* tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler
+* tfa.seq2seq.sampler.ScheduledOutputTrainingSampler
+* tfa.seq2seq.sampler.TrainingSampler
+* tfa.seq2seq.sampler.bernoulli_sample
+* tfa.seq2seq.sampler.categorical_sample
+* tfa.seq2seq.sequence_loss
+* tfa.seq2seq.tile_batch
+* tfa.text
+* tfa.text.crf
+* tfa.text.crf.CrfDecodeForwardRnnCell
+* tfa.text.crf.crf_binary_score
+* tfa.text.crf.crf_decode
+* tfa.text.crf.crf_decode_backward
+* tfa.text.crf.crf_decode_forward
+* tfa.text.crf.crf_forward
+* tfa.text.crf.crf_log_likelihood
+* tfa.text.crf.crf_log_norm
+* tfa.text.crf.crf_multitag_sequence_score
+* tfa.text.crf.crf_sequence_score
+* tfa.text.crf.crf_unary_score
+* tfa.text.crf.viterbi_decode
+* tfa.text.crf_binary_score
+* tfa.text.crf_decode
+* tfa.text.crf_decode_backward
+* tfa.text.crf_decode_forward
+* tfa.text.crf_forward
+* tfa.text.crf_log_likelihood
+* tfa.text.crf_log_norm
+* tfa.text.crf_multitag_sequence_score
+* tfa.text.crf_sequence_score
+* tfa.text.crf_unary_score
+* tfa.text.parse_time
+* tfa.text.parse_time_op
+* tfa.text.parse_time_op.parse_time
+* tfa.text.skip_gram_ops
+* tfa.text.skip_gram_ops.skip_gram_sample
+* tfa.text.skip_gram_ops.skip_gram_sample_with_text_vocab
+* tfa.text.skip_gram_sample
+* tfa.text.skip_gram_sample_with_text_vocab
+* tfa.text.viterbi_decode
\ No newline at end of file
diff --git a/docs/api_docs/python/tfa.md b/docs/api_docs/python/tfa.md
new file mode 100644
index 0000000000..291d98f736
--- /dev/null
+++ b/docs/api_docs/python/tfa.md
@@ -0,0 +1,47 @@
+
+
+
+
+
+# Module: tfa
+
+
+
+
+
+
+Useful extra functionality for TensorFlow maintained by SIG-addons.
+
+
+
+## Modules
+
+[`activations`](./tfa/activations.md) module: Addititonal activation functions.
+
+[`callbacks`](./tfa/callbacks.md) module: Additional callbacks that conform to Keras API.
+
+[`image`](./tfa/image.md) module: Additional image manipulation ops.
+
+[`layers`](./tfa/layers.md) module: Additional layers that conform to Keras API.
+
+[`losses`](./tfa/losses.md) module: Additional losses that conform to Keras API.
+
+[`metrics`](./tfa/metrics.md) module: Additional metrics that conform to Keras API.
+
+[`optimizers`](./tfa/optimizers.md) module: Additional optimizers that conform to Keras API.
+
+[`rnn`](./tfa/rnn.md) module: Additional RNN cells that corform to Keras API.
+
+[`seq2seq`](./tfa/seq2seq.md) module: Additional ops for building neural network sequence to sequence decoders and
+
+[`text`](./tfa/text.md) module: Additional text-processing ops.
+
+
+
diff --git a/docs/api_docs/python/tfa/_api_cache.json b/docs/api_docs/python/tfa/_api_cache.json
new file mode 100644
index 0000000000..1c5168095c
--- /dev/null
+++ b/docs/api_docs/python/tfa/_api_cache.json
@@ -0,0 +1,9151 @@
+{
+ "duplicate_of": {
+ "tfa.callbacks.TimeStopping.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.callbacks.TimeStopping.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.callbacks.TimeStopping.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.callbacks.TimeStopping.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.callbacks.TimeStopping.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.callbacks.TimeStopping.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.callbacks.TimeStopping.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.callbacks.TimeStopping.on_batch_begin": "tfa.callbacks.TQDMProgressBar.on_batch_begin",
+ "tfa.callbacks.TimeStopping.on_predict_batch_begin": "tfa.callbacks.TQDMProgressBar.on_predict_batch_begin",
+ "tfa.callbacks.TimeStopping.on_predict_batch_end": "tfa.callbacks.TQDMProgressBar.on_predict_batch_end",
+ "tfa.callbacks.TimeStopping.on_predict_begin": "tfa.callbacks.TQDMProgressBar.on_predict_begin",
+ "tfa.callbacks.TimeStopping.on_predict_end": "tfa.callbacks.TQDMProgressBar.on_predict_end",
+ "tfa.callbacks.TimeStopping.on_test_batch_begin": "tfa.callbacks.TQDMProgressBar.on_test_batch_begin",
+ "tfa.callbacks.TimeStopping.on_test_batch_end": "tfa.callbacks.TQDMProgressBar.on_test_batch_end",
+ "tfa.callbacks.TimeStopping.on_test_begin": "tfa.callbacks.TQDMProgressBar.on_test_begin",
+ "tfa.callbacks.TimeStopping.on_test_end": "tfa.callbacks.TQDMProgressBar.on_test_end",
+ "tfa.callbacks.TimeStopping.on_train_batch_begin": "tfa.callbacks.TQDMProgressBar.on_train_batch_begin",
+ "tfa.callbacks.TimeStopping.on_train_batch_end": "tfa.callbacks.TQDMProgressBar.on_train_batch_end",
+ "tfa.callbacks.TimeStopping.set_model": "tfa.callbacks.TQDMProgressBar.set_model",
+ "tfa.callbacks.TimeStopping.set_params": "tfa.callbacks.TQDMProgressBar.set_params",
+ "tfa.callbacks.absolute_import": "tfa.activations.absolute_import",
+ "tfa.callbacks.division": "tfa.activations.division",
+ "tfa.callbacks.print_function": "tfa.activations.print_function",
+ "tfa.callbacks.time_stopping.TimeStopping": "tfa.callbacks.TimeStopping",
+ "tfa.callbacks.time_stopping.TimeStopping.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.callbacks.time_stopping.TimeStopping.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.callbacks.time_stopping.TimeStopping.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.callbacks.time_stopping.TimeStopping.__init__": "tfa.callbacks.TimeStopping.__init__",
+ "tfa.callbacks.time_stopping.TimeStopping.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.callbacks.time_stopping.TimeStopping.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.callbacks.time_stopping.TimeStopping.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.callbacks.time_stopping.TimeStopping.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.callbacks.time_stopping.TimeStopping.get_config": "tfa.callbacks.TimeStopping.get_config",
+ "tfa.callbacks.time_stopping.TimeStopping.on_batch_begin": "tfa.callbacks.TQDMProgressBar.on_batch_begin",
+ "tfa.callbacks.time_stopping.TimeStopping.on_batch_end": "tfa.callbacks.TimeStopping.on_batch_end",
+ "tfa.callbacks.time_stopping.TimeStopping.on_epoch_begin": "tfa.callbacks.TimeStopping.on_epoch_begin",
+ "tfa.callbacks.time_stopping.TimeStopping.on_epoch_end": "tfa.callbacks.TimeStopping.on_epoch_end",
+ "tfa.callbacks.time_stopping.TimeStopping.on_predict_batch_begin": "tfa.callbacks.TQDMProgressBar.on_predict_batch_begin",
+ "tfa.callbacks.time_stopping.TimeStopping.on_predict_batch_end": "tfa.callbacks.TQDMProgressBar.on_predict_batch_end",
+ "tfa.callbacks.time_stopping.TimeStopping.on_predict_begin": "tfa.callbacks.TQDMProgressBar.on_predict_begin",
+ "tfa.callbacks.time_stopping.TimeStopping.on_predict_end": "tfa.callbacks.TQDMProgressBar.on_predict_end",
+ "tfa.callbacks.time_stopping.TimeStopping.on_test_batch_begin": "tfa.callbacks.TQDMProgressBar.on_test_batch_begin",
+ "tfa.callbacks.time_stopping.TimeStopping.on_test_batch_end": "tfa.callbacks.TQDMProgressBar.on_test_batch_end",
+ "tfa.callbacks.time_stopping.TimeStopping.on_test_begin": "tfa.callbacks.TQDMProgressBar.on_test_begin",
+ "tfa.callbacks.time_stopping.TimeStopping.on_test_end": "tfa.callbacks.TQDMProgressBar.on_test_end",
+ "tfa.callbacks.time_stopping.TimeStopping.on_train_batch_begin": "tfa.callbacks.TQDMProgressBar.on_train_batch_begin",
+ "tfa.callbacks.time_stopping.TimeStopping.on_train_batch_end": "tfa.callbacks.TQDMProgressBar.on_train_batch_end",
+ "tfa.callbacks.time_stopping.TimeStopping.on_train_begin": "tfa.callbacks.TimeStopping.on_train_begin",
+ "tfa.callbacks.time_stopping.TimeStopping.on_train_end": "tfa.callbacks.TimeStopping.on_train_end",
+ "tfa.callbacks.time_stopping.TimeStopping.set_model": "tfa.callbacks.TQDMProgressBar.set_model",
+ "tfa.callbacks.time_stopping.TimeStopping.set_params": "tfa.callbacks.TQDMProgressBar.set_params",
+ "tfa.callbacks.time_stopping.absolute_import": "tfa.activations.absolute_import",
+ "tfa.callbacks.time_stopping.division": "tfa.activations.division",
+ "tfa.callbacks.time_stopping.print_function": "tfa.activations.print_function",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar": "tfa.callbacks.TQDMProgressBar",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__init__": "tfa.callbacks.TQDMProgressBar.__init__",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.format_metrics": "tfa.callbacks.TQDMProgressBar.format_metrics",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.get_config": "tfa.callbacks.TQDMProgressBar.get_config",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_batch_begin": "tfa.callbacks.TQDMProgressBar.on_batch_begin",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_batch_end": "tfa.callbacks.TQDMProgressBar.on_batch_end",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_epoch_begin": "tfa.callbacks.TQDMProgressBar.on_epoch_begin",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_epoch_end": "tfa.callbacks.TQDMProgressBar.on_epoch_end",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_predict_batch_begin": "tfa.callbacks.TQDMProgressBar.on_predict_batch_begin",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_predict_batch_end": "tfa.callbacks.TQDMProgressBar.on_predict_batch_end",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_predict_begin": "tfa.callbacks.TQDMProgressBar.on_predict_begin",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_predict_end": "tfa.callbacks.TQDMProgressBar.on_predict_end",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_test_batch_begin": "tfa.callbacks.TQDMProgressBar.on_test_batch_begin",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_test_batch_end": "tfa.callbacks.TQDMProgressBar.on_test_batch_end",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_test_begin": "tfa.callbacks.TQDMProgressBar.on_test_begin",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_test_end": "tfa.callbacks.TQDMProgressBar.on_test_end",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_train_batch_begin": "tfa.callbacks.TQDMProgressBar.on_train_batch_begin",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_train_batch_end": "tfa.callbacks.TQDMProgressBar.on_train_batch_end",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_train_begin": "tfa.callbacks.TQDMProgressBar.on_train_begin",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_train_end": "tfa.callbacks.TQDMProgressBar.on_train_end",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.set_model": "tfa.callbacks.TQDMProgressBar.set_model",
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.set_params": "tfa.callbacks.TQDMProgressBar.set_params",
+ "tfa.callbacks.tqdm_progress_bar.absolute_import": "tfa.activations.absolute_import",
+ "tfa.callbacks.tqdm_progress_bar.division": "tfa.activations.division",
+ "tfa.callbacks.tqdm_progress_bar.print_function": "tfa.activations.print_function",
+ "tfa.image.absolute_import": "tfa.activations.absolute_import",
+ "tfa.image.distance_transform.absolute_import": "tfa.activations.absolute_import",
+ "tfa.image.distance_transform.division": "tfa.activations.division",
+ "tfa.image.distance_transform.euclidean_dist_transform": "tfa.image.euclidean_dist_transform",
+ "tfa.image.distance_transform.print_function": "tfa.activations.print_function",
+ "tfa.image.distort_image_ops.absolute_import": "tfa.activations.absolute_import",
+ "tfa.image.distort_image_ops.adjust_hsv_in_yiq": "tfa.image.adjust_hsv_in_yiq",
+ "tfa.image.distort_image_ops.division": "tfa.activations.division",
+ "tfa.image.distort_image_ops.print_function": "tfa.activations.print_function",
+ "tfa.image.distort_image_ops.random_hsv_in_yiq": "tfa.image.random_hsv_in_yiq",
+ "tfa.image.division": "tfa.activations.division",
+ "tfa.image.filters.absolute_import": "tfa.activations.absolute_import",
+ "tfa.image.filters.division": "tfa.activations.division",
+ "tfa.image.filters.mean_filter2d": "tfa.image.mean_filter2d",
+ "tfa.image.filters.median_filter2d": "tfa.image.median_filter2d",
+ "tfa.image.filters.print_function": "tfa.activations.print_function",
+ "tfa.image.print_function": "tfa.activations.print_function",
+ "tfa.image.resampler_ops.absolute_import": "tfa.activations.absolute_import",
+ "tfa.image.resampler_ops.division": "tfa.activations.division",
+ "tfa.image.resampler_ops.print_function": "tfa.activations.print_function",
+ "tfa.image.resampler_ops.resampler": "tfa.image.resampler",
+ "tfa.image.transform_ops.absolute_import": "tfa.activations.absolute_import",
+ "tfa.image.transform_ops.division": "tfa.activations.division",
+ "tfa.image.transform_ops.print_function": "tfa.activations.print_function",
+ "tfa.image.transform_ops.rotate": "tfa.image.rotate",
+ "tfa.image.transform_ops.transform": "tfa.image.transform",
+ "tfa.image.translate_ops.absolute_import": "tfa.activations.absolute_import",
+ "tfa.image.translate_ops.division": "tfa.activations.division",
+ "tfa.image.translate_ops.print_function": "tfa.activations.print_function",
+ "tfa.image.translate_ops.transform": "tfa.image.transform",
+ "tfa.image.translate_ops.translate": "tfa.image.translate",
+ "tfa.image.utils.absolute_import": "tfa.activations.absolute_import",
+ "tfa.image.utils.division": "tfa.activations.division",
+ "tfa.image.utils.print_function": "tfa.activations.print_function",
+ "tfa.layers.CorrelationCost.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.CorrelationCost.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.CorrelationCost.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.CorrelationCost.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.CorrelationCost.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.CorrelationCost.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.CorrelationCost.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.GELU.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.GELU.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.GELU.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.GELU.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.GELU.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.GELU.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.GELU.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.GELU.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.GELU.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.GELU.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.GELU.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.GELU.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.GELU.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.GELU.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.GELU.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.GELU.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.GELU.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.GELU.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.GELU.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.GELU.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.GELU.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.GELU.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.GELU.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.GELU.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.GELU.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.GELU.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.GELU.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.GELU.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.GELU.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.GELU.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.GELU.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.GELU.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.GELU.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.GELU.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.GELU.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.GELU.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.GELU.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.GELU.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.GELU.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.GELU.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.GELU.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.GELU.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.GELU.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.GELU.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.GELU.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.GELU.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.GELU.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.GELU.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.GroupNormalization.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.GroupNormalization.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.GroupNormalization.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.GroupNormalization.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.GroupNormalization.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.GroupNormalization.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.GroupNormalization.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.GroupNormalization.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.GroupNormalization.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.GroupNormalization.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.GroupNormalization.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.GroupNormalization.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.GroupNormalization.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.GroupNormalization.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.GroupNormalization.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.GroupNormalization.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.GroupNormalization.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.GroupNormalization.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.GroupNormalization.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.GroupNormalization.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.GroupNormalization.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.GroupNormalization.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.GroupNormalization.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.GroupNormalization.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.GroupNormalization.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.GroupNormalization.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.GroupNormalization.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.GroupNormalization.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.GroupNormalization.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.GroupNormalization.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.GroupNormalization.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.GroupNormalization.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.GroupNormalization.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.GroupNormalization.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.GroupNormalization.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.GroupNormalization.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.GroupNormalization.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.GroupNormalization.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.GroupNormalization.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.GroupNormalization.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.GroupNormalization.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.GroupNormalization.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.GroupNormalization.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.GroupNormalization.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.GroupNormalization.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.GroupNormalization.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.GroupNormalization.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.GroupNormalization.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.InstanceNormalization.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.InstanceNormalization.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.InstanceNormalization.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.InstanceNormalization.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.InstanceNormalization.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.InstanceNormalization.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.InstanceNormalization.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.InstanceNormalization.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.InstanceNormalization.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.InstanceNormalization.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.InstanceNormalization.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.InstanceNormalization.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.InstanceNormalization.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.InstanceNormalization.build": "tfa.layers.GroupNormalization.build",
+ "tfa.layers.InstanceNormalization.call": "tfa.layers.GroupNormalization.call",
+ "tfa.layers.InstanceNormalization.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.InstanceNormalization.compute_output_shape": "tfa.layers.GroupNormalization.compute_output_shape",
+ "tfa.layers.InstanceNormalization.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.InstanceNormalization.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.InstanceNormalization.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.InstanceNormalization.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.InstanceNormalization.get_config": "tfa.layers.GroupNormalization.get_config",
+ "tfa.layers.InstanceNormalization.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.InstanceNormalization.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.InstanceNormalization.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.InstanceNormalization.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.InstanceNormalization.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.InstanceNormalization.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.InstanceNormalization.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.InstanceNormalization.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.InstanceNormalization.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.InstanceNormalization.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.InstanceNormalization.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.InstanceNormalization.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.InstanceNormalization.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.InstanceNormalization.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.InstanceNormalization.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.InstanceNormalization.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.InstanceNormalization.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.InstanceNormalization.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.InstanceNormalization.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.InstanceNormalization.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.InstanceNormalization.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.InstanceNormalization.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.InstanceNormalization.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.InstanceNormalization.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.InstanceNormalization.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.InstanceNormalization.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.InstanceNormalization.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.InstanceNormalization.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.InstanceNormalization.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.InstanceNormalization.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.Maxout.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.Maxout.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.Maxout.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.Maxout.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.Maxout.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.Maxout.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.Maxout.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.Maxout.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.Maxout.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.Maxout.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.Maxout.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.Maxout.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.Maxout.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.Maxout.build": "tfa.layers.GELU.build",
+ "tfa.layers.Maxout.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.Maxout.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.Maxout.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.Maxout.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.Maxout.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.Maxout.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.Maxout.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.Maxout.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.Maxout.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.Maxout.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.Maxout.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.Maxout.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.Maxout.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.Maxout.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.Maxout.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.Maxout.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.Maxout.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.Maxout.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.Maxout.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.Maxout.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.Maxout.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.Maxout.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.Maxout.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.Maxout.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.Maxout.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.Maxout.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.Maxout.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.Maxout.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.Maxout.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.Maxout.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.Maxout.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.Maxout.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.Maxout.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.Maxout.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.Maxout.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.PoincareNormalize.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.PoincareNormalize.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.PoincareNormalize.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.PoincareNormalize.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.PoincareNormalize.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.PoincareNormalize.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.PoincareNormalize.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.PoincareNormalize.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.PoincareNormalize.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.PoincareNormalize.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.PoincareNormalize.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.PoincareNormalize.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.PoincareNormalize.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.PoincareNormalize.build": "tfa.layers.GELU.build",
+ "tfa.layers.PoincareNormalize.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.PoincareNormalize.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.PoincareNormalize.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.PoincareNormalize.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.PoincareNormalize.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.PoincareNormalize.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.PoincareNormalize.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.PoincareNormalize.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.PoincareNormalize.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.PoincareNormalize.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.PoincareNormalize.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.PoincareNormalize.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.PoincareNormalize.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.PoincareNormalize.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.PoincareNormalize.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.PoincareNormalize.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.PoincareNormalize.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.PoincareNormalize.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.PoincareNormalize.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.PoincareNormalize.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.PoincareNormalize.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.PoincareNormalize.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.PoincareNormalize.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.PoincareNormalize.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.PoincareNormalize.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.PoincareNormalize.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.PoincareNormalize.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.PoincareNormalize.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.PoincareNormalize.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.PoincareNormalize.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.PoincareNormalize.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.PoincareNormalize.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.PoincareNormalize.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.PoincareNormalize.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.PoincareNormalize.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.Sparsemax.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.Sparsemax.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.Sparsemax.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.Sparsemax.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.Sparsemax.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.Sparsemax.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.Sparsemax.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.Sparsemax.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.Sparsemax.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.Sparsemax.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.Sparsemax.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.Sparsemax.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.Sparsemax.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.Sparsemax.build": "tfa.layers.GELU.build",
+ "tfa.layers.Sparsemax.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.Sparsemax.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.Sparsemax.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.Sparsemax.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.Sparsemax.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.Sparsemax.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.Sparsemax.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.Sparsemax.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.Sparsemax.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.Sparsemax.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.Sparsemax.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.Sparsemax.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.Sparsemax.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.Sparsemax.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.Sparsemax.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.Sparsemax.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.Sparsemax.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.Sparsemax.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.Sparsemax.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.Sparsemax.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.Sparsemax.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.Sparsemax.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.Sparsemax.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.Sparsemax.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.Sparsemax.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.Sparsemax.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.Sparsemax.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.Sparsemax.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.Sparsemax.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.Sparsemax.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.Sparsemax.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.Sparsemax.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.Sparsemax.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.Sparsemax.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.Sparsemax.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.WeightNormalization.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.WeightNormalization.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.WeightNormalization.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.WeightNormalization.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.WeightNormalization.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.WeightNormalization.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.WeightNormalization.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.WeightNormalization.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.WeightNormalization.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.WeightNormalization.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.WeightNormalization.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.WeightNormalization.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.WeightNormalization.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.WeightNormalization.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.WeightNormalization.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.WeightNormalization.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.WeightNormalization.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.WeightNormalization.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.WeightNormalization.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.WeightNormalization.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.WeightNormalization.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.WeightNormalization.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.WeightNormalization.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.WeightNormalization.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.WeightNormalization.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.WeightNormalization.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.WeightNormalization.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.WeightNormalization.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.WeightNormalization.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.WeightNormalization.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.WeightNormalization.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.WeightNormalization.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.WeightNormalization.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.WeightNormalization.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.WeightNormalization.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.WeightNormalization.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.WeightNormalization.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.WeightNormalization.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.WeightNormalization.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.WeightNormalization.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.WeightNormalization.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.WeightNormalization.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.WeightNormalization.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.WeightNormalization.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.WeightNormalization.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.WeightNormalization.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.WeightNormalization.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.absolute_import": "tfa.activations.absolute_import",
+ "tfa.layers.division": "tfa.activations.division",
+ "tfa.layers.gelu.GELU": "tfa.layers.GELU",
+ "tfa.layers.gelu.GELU.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.gelu.GELU.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.gelu.GELU.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.gelu.GELU.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.gelu.GELU.__init__": "tfa.layers.GELU.__init__",
+ "tfa.layers.gelu.GELU.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.gelu.GELU.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.gelu.GELU.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.gelu.GELU.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.gelu.GELU.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.gelu.GELU.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.gelu.GELU.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.gelu.GELU.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.gelu.GELU.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.gelu.GELU.build": "tfa.layers.GELU.build",
+ "tfa.layers.gelu.GELU.call": "tfa.layers.GELU.call",
+ "tfa.layers.gelu.GELU.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.gelu.GELU.compute_output_shape": "tfa.layers.GELU.compute_output_shape",
+ "tfa.layers.gelu.GELU.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.gelu.GELU.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.gelu.GELU.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.gelu.GELU.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.gelu.GELU.get_config": "tfa.layers.GELU.get_config",
+ "tfa.layers.gelu.GELU.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.gelu.GELU.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.gelu.GELU.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.gelu.GELU.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.gelu.GELU.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.gelu.GELU.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.gelu.GELU.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.gelu.GELU.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.gelu.GELU.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.gelu.GELU.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.gelu.GELU.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.gelu.GELU.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.gelu.GELU.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.gelu.GELU.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.gelu.GELU.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.gelu.GELU.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.gelu.GELU.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.gelu.GELU.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.gelu.GELU.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.gelu.GELU.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.gelu.GELU.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.gelu.GELU.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.gelu.GELU.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.gelu.GELU.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.gelu.GELU.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.gelu.GELU.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.gelu.GELU.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.gelu.GELU.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.gelu.GELU.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.gelu.GELU.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.gelu.absolute_import": "tfa.activations.absolute_import",
+ "tfa.layers.gelu.division": "tfa.activations.division",
+ "tfa.layers.gelu.print_function": "tfa.activations.print_function",
+ "tfa.layers.maxout.Maxout": "tfa.layers.Maxout",
+ "tfa.layers.maxout.Maxout.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.maxout.Maxout.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.maxout.Maxout.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.maxout.Maxout.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.maxout.Maxout.__init__": "tfa.layers.Maxout.__init__",
+ "tfa.layers.maxout.Maxout.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.maxout.Maxout.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.maxout.Maxout.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.maxout.Maxout.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.maxout.Maxout.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.maxout.Maxout.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.maxout.Maxout.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.maxout.Maxout.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.maxout.Maxout.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.maxout.Maxout.build": "tfa.layers.GELU.build",
+ "tfa.layers.maxout.Maxout.call": "tfa.layers.Maxout.call",
+ "tfa.layers.maxout.Maxout.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.maxout.Maxout.compute_output_shape": "tfa.layers.Maxout.compute_output_shape",
+ "tfa.layers.maxout.Maxout.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.maxout.Maxout.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.maxout.Maxout.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.maxout.Maxout.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.maxout.Maxout.get_config": "tfa.layers.Maxout.get_config",
+ "tfa.layers.maxout.Maxout.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.maxout.Maxout.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.maxout.Maxout.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.maxout.Maxout.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.maxout.Maxout.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.maxout.Maxout.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.maxout.Maxout.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.maxout.Maxout.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.maxout.Maxout.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.maxout.Maxout.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.maxout.Maxout.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.maxout.Maxout.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.maxout.Maxout.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.maxout.Maxout.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.maxout.Maxout.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.maxout.Maxout.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.maxout.Maxout.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.maxout.Maxout.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.maxout.Maxout.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.maxout.Maxout.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.maxout.Maxout.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.maxout.Maxout.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.maxout.Maxout.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.maxout.Maxout.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.maxout.Maxout.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.maxout.Maxout.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.maxout.Maxout.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.maxout.Maxout.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.maxout.Maxout.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.maxout.Maxout.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.maxout.absolute_import": "tfa.activations.absolute_import",
+ "tfa.layers.maxout.division": "tfa.activations.division",
+ "tfa.layers.maxout.print_function": "tfa.activations.print_function",
+ "tfa.layers.normalizations.GroupNormalization": "tfa.layers.GroupNormalization",
+ "tfa.layers.normalizations.GroupNormalization.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.normalizations.GroupNormalization.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.normalizations.GroupNormalization.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.normalizations.GroupNormalization.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.normalizations.GroupNormalization.__init__": "tfa.layers.GroupNormalization.__init__",
+ "tfa.layers.normalizations.GroupNormalization.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.normalizations.GroupNormalization.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.normalizations.GroupNormalization.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.normalizations.GroupNormalization.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.normalizations.GroupNormalization.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.normalizations.GroupNormalization.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.normalizations.GroupNormalization.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.normalizations.GroupNormalization.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.normalizations.GroupNormalization.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.normalizations.GroupNormalization.build": "tfa.layers.GroupNormalization.build",
+ "tfa.layers.normalizations.GroupNormalization.call": "tfa.layers.GroupNormalization.call",
+ "tfa.layers.normalizations.GroupNormalization.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.normalizations.GroupNormalization.compute_output_shape": "tfa.layers.GroupNormalization.compute_output_shape",
+ "tfa.layers.normalizations.GroupNormalization.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.normalizations.GroupNormalization.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.normalizations.GroupNormalization.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.normalizations.GroupNormalization.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.normalizations.GroupNormalization.get_config": "tfa.layers.GroupNormalization.get_config",
+ "tfa.layers.normalizations.GroupNormalization.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.normalizations.GroupNormalization.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.normalizations.GroupNormalization.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.normalizations.GroupNormalization.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.normalizations.GroupNormalization.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.normalizations.GroupNormalization.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.normalizations.GroupNormalization.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.normalizations.GroupNormalization.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.normalizations.GroupNormalization.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.normalizations.GroupNormalization.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.normalizations.GroupNormalization.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.normalizations.GroupNormalization.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.normalizations.GroupNormalization.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.normalizations.GroupNormalization.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.normalizations.GroupNormalization.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.normalizations.GroupNormalization.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.normalizations.GroupNormalization.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.normalizations.GroupNormalization.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.normalizations.GroupNormalization.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.normalizations.GroupNormalization.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.normalizations.GroupNormalization.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.normalizations.GroupNormalization.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.normalizations.GroupNormalization.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.normalizations.GroupNormalization.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.normalizations.GroupNormalization.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.normalizations.GroupNormalization.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.normalizations.GroupNormalization.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.normalizations.GroupNormalization.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.normalizations.GroupNormalization.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.normalizations.GroupNormalization.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.normalizations.InstanceNormalization": "tfa.layers.InstanceNormalization",
+ "tfa.layers.normalizations.InstanceNormalization.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.normalizations.InstanceNormalization.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.normalizations.InstanceNormalization.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.normalizations.InstanceNormalization.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.normalizations.InstanceNormalization.__init__": "tfa.layers.InstanceNormalization.__init__",
+ "tfa.layers.normalizations.InstanceNormalization.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.normalizations.InstanceNormalization.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.normalizations.InstanceNormalization.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.normalizations.InstanceNormalization.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.normalizations.InstanceNormalization.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.normalizations.InstanceNormalization.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.normalizations.InstanceNormalization.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.normalizations.InstanceNormalization.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.normalizations.InstanceNormalization.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.normalizations.InstanceNormalization.build": "tfa.layers.GroupNormalization.build",
+ "tfa.layers.normalizations.InstanceNormalization.call": "tfa.layers.GroupNormalization.call",
+ "tfa.layers.normalizations.InstanceNormalization.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.normalizations.InstanceNormalization.compute_output_shape": "tfa.layers.GroupNormalization.compute_output_shape",
+ "tfa.layers.normalizations.InstanceNormalization.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.normalizations.InstanceNormalization.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.normalizations.InstanceNormalization.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.normalizations.InstanceNormalization.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.normalizations.InstanceNormalization.get_config": "tfa.layers.GroupNormalization.get_config",
+ "tfa.layers.normalizations.InstanceNormalization.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.normalizations.InstanceNormalization.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.normalizations.InstanceNormalization.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.normalizations.InstanceNormalization.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.normalizations.InstanceNormalization.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.normalizations.InstanceNormalization.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.normalizations.InstanceNormalization.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.normalizations.InstanceNormalization.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.normalizations.InstanceNormalization.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.normalizations.InstanceNormalization.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.normalizations.InstanceNormalization.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.normalizations.InstanceNormalization.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.normalizations.InstanceNormalization.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.normalizations.InstanceNormalization.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.normalizations.InstanceNormalization.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.normalizations.InstanceNormalization.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.normalizations.InstanceNormalization.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.normalizations.InstanceNormalization.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.normalizations.InstanceNormalization.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.normalizations.InstanceNormalization.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.normalizations.InstanceNormalization.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.normalizations.InstanceNormalization.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.normalizations.InstanceNormalization.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.normalizations.InstanceNormalization.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.normalizations.InstanceNormalization.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.normalizations.InstanceNormalization.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.normalizations.InstanceNormalization.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.normalizations.InstanceNormalization.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.normalizations.InstanceNormalization.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.normalizations.InstanceNormalization.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.normalizations.absolute_import": "tfa.activations.absolute_import",
+ "tfa.layers.normalizations.division": "tfa.activations.division",
+ "tfa.layers.normalizations.print_function": "tfa.activations.print_function",
+ "tfa.layers.optical_flow.CorrelationCost": "tfa.layers.CorrelationCost",
+ "tfa.layers.optical_flow.CorrelationCost.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.optical_flow.CorrelationCost.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.optical_flow.CorrelationCost.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.optical_flow.CorrelationCost.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.optical_flow.CorrelationCost.__init__": "tfa.layers.CorrelationCost.__init__",
+ "tfa.layers.optical_flow.CorrelationCost.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.optical_flow.CorrelationCost.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.optical_flow.CorrelationCost.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.optical_flow.CorrelationCost.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.optical_flow.CorrelationCost.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.optical_flow.CorrelationCost.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.optical_flow.CorrelationCost.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.optical_flow.CorrelationCost.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.optical_flow.CorrelationCost.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.optical_flow.CorrelationCost.build": "tfa.layers.CorrelationCost.build",
+ "tfa.layers.optical_flow.CorrelationCost.call": "tfa.layers.CorrelationCost.call",
+ "tfa.layers.optical_flow.CorrelationCost.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.optical_flow.CorrelationCost.compute_output_shape": "tfa.layers.CorrelationCost.compute_output_shape",
+ "tfa.layers.optical_flow.CorrelationCost.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.optical_flow.CorrelationCost.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.optical_flow.CorrelationCost.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.optical_flow.CorrelationCost.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.optical_flow.CorrelationCost.get_config": "tfa.layers.CorrelationCost.get_config",
+ "tfa.layers.optical_flow.CorrelationCost.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.optical_flow.CorrelationCost.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.optical_flow.CorrelationCost.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.optical_flow.CorrelationCost.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.optical_flow.CorrelationCost.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.optical_flow.CorrelationCost.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.optical_flow.CorrelationCost.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.optical_flow.CorrelationCost.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.optical_flow.CorrelationCost.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.optical_flow.CorrelationCost.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.optical_flow.CorrelationCost.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.optical_flow.CorrelationCost.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.optical_flow.CorrelationCost.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.optical_flow.CorrelationCost.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.optical_flow.CorrelationCost.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.optical_flow.CorrelationCost.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.optical_flow.CorrelationCost.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.optical_flow.CorrelationCost.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.optical_flow.CorrelationCost.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.optical_flow.CorrelationCost.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.optical_flow.CorrelationCost.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.optical_flow.CorrelationCost.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.optical_flow.CorrelationCost.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.optical_flow.CorrelationCost.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.optical_flow.CorrelationCost.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.optical_flow.CorrelationCost.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.optical_flow.CorrelationCost.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.optical_flow.CorrelationCost.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.optical_flow.CorrelationCost.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.optical_flow.CorrelationCost.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.optical_flow.absolute_import": "tfa.activations.absolute_import",
+ "tfa.layers.optical_flow.division": "tfa.activations.division",
+ "tfa.layers.optical_flow.print_function": "tfa.activations.print_function",
+ "tfa.layers.poincare.PoincareNormalize": "tfa.layers.PoincareNormalize",
+ "tfa.layers.poincare.PoincareNormalize.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.poincare.PoincareNormalize.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.poincare.PoincareNormalize.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.poincare.PoincareNormalize.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.poincare.PoincareNormalize.__init__": "tfa.layers.PoincareNormalize.__init__",
+ "tfa.layers.poincare.PoincareNormalize.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.poincare.PoincareNormalize.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.poincare.PoincareNormalize.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.poincare.PoincareNormalize.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.poincare.PoincareNormalize.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.poincare.PoincareNormalize.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.poincare.PoincareNormalize.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.poincare.PoincareNormalize.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.poincare.PoincareNormalize.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.poincare.PoincareNormalize.build": "tfa.layers.GELU.build",
+ "tfa.layers.poincare.PoincareNormalize.call": "tfa.layers.PoincareNormalize.call",
+ "tfa.layers.poincare.PoincareNormalize.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.poincare.PoincareNormalize.compute_output_shape": "tfa.layers.PoincareNormalize.compute_output_shape",
+ "tfa.layers.poincare.PoincareNormalize.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.poincare.PoincareNormalize.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.poincare.PoincareNormalize.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.poincare.PoincareNormalize.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.poincare.PoincareNormalize.get_config": "tfa.layers.PoincareNormalize.get_config",
+ "tfa.layers.poincare.PoincareNormalize.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.poincare.PoincareNormalize.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.poincare.PoincareNormalize.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.poincare.PoincareNormalize.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.poincare.PoincareNormalize.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.poincare.PoincareNormalize.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.poincare.PoincareNormalize.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.poincare.PoincareNormalize.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.poincare.PoincareNormalize.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.poincare.PoincareNormalize.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.poincare.PoincareNormalize.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.poincare.PoincareNormalize.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.poincare.PoincareNormalize.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.poincare.PoincareNormalize.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.poincare.PoincareNormalize.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.poincare.PoincareNormalize.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.poincare.PoincareNormalize.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.poincare.PoincareNormalize.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.poincare.PoincareNormalize.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.poincare.PoincareNormalize.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.poincare.PoincareNormalize.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.poincare.PoincareNormalize.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.poincare.PoincareNormalize.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.poincare.PoincareNormalize.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.poincare.PoincareNormalize.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.poincare.PoincareNormalize.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.poincare.PoincareNormalize.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.poincare.PoincareNormalize.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.poincare.PoincareNormalize.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.poincare.PoincareNormalize.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.poincare.absolute_import": "tfa.activations.absolute_import",
+ "tfa.layers.poincare.division": "tfa.activations.division",
+ "tfa.layers.poincare.print_function": "tfa.activations.print_function",
+ "tfa.layers.print_function": "tfa.activations.print_function",
+ "tfa.layers.sparsemax.Sparsemax": "tfa.layers.Sparsemax",
+ "tfa.layers.sparsemax.Sparsemax.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.sparsemax.Sparsemax.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.sparsemax.Sparsemax.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.sparsemax.Sparsemax.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.sparsemax.Sparsemax.__init__": "tfa.layers.Sparsemax.__init__",
+ "tfa.layers.sparsemax.Sparsemax.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.sparsemax.Sparsemax.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.sparsemax.Sparsemax.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.sparsemax.Sparsemax.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.sparsemax.Sparsemax.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.layers.sparsemax.Sparsemax.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.sparsemax.Sparsemax.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.sparsemax.Sparsemax.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.sparsemax.Sparsemax.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.sparsemax.Sparsemax.build": "tfa.layers.GELU.build",
+ "tfa.layers.sparsemax.Sparsemax.call": "tfa.layers.Sparsemax.call",
+ "tfa.layers.sparsemax.Sparsemax.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.sparsemax.Sparsemax.compute_output_shape": "tfa.layers.Sparsemax.compute_output_shape",
+ "tfa.layers.sparsemax.Sparsemax.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.sparsemax.Sparsemax.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.sparsemax.Sparsemax.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.sparsemax.Sparsemax.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.sparsemax.Sparsemax.get_config": "tfa.layers.Sparsemax.get_config",
+ "tfa.layers.sparsemax.Sparsemax.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.sparsemax.Sparsemax.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.sparsemax.Sparsemax.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.sparsemax.Sparsemax.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.sparsemax.Sparsemax.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.sparsemax.Sparsemax.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.sparsemax.Sparsemax.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.sparsemax.Sparsemax.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.sparsemax.Sparsemax.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.sparsemax.Sparsemax.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.sparsemax.Sparsemax.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.sparsemax.Sparsemax.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.sparsemax.Sparsemax.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.sparsemax.Sparsemax.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.sparsemax.Sparsemax.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.sparsemax.Sparsemax.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.sparsemax.Sparsemax.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.sparsemax.Sparsemax.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.sparsemax.Sparsemax.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.sparsemax.Sparsemax.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.sparsemax.Sparsemax.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.sparsemax.Sparsemax.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.sparsemax.Sparsemax.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.sparsemax.Sparsemax.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.sparsemax.Sparsemax.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.sparsemax.Sparsemax.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.sparsemax.Sparsemax.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.sparsemax.Sparsemax.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.sparsemax.Sparsemax.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.sparsemax.Sparsemax.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.sparsemax.absolute_import": "tfa.activations.absolute_import",
+ "tfa.layers.sparsemax.division": "tfa.activations.division",
+ "tfa.layers.sparsemax.print_function": "tfa.activations.print_function",
+ "tfa.layers.sparsemax.sparsemax": "tfa.activations.sparsemax",
+ "tfa.layers.wrappers.WeightNormalization": "tfa.layers.WeightNormalization",
+ "tfa.layers.wrappers.WeightNormalization.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.layers.wrappers.WeightNormalization.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.layers.wrappers.WeightNormalization.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.layers.wrappers.WeightNormalization.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.layers.wrappers.WeightNormalization.__init__": "tfa.layers.WeightNormalization.__init__",
+ "tfa.layers.wrappers.WeightNormalization.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.layers.wrappers.WeightNormalization.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.layers.wrappers.WeightNormalization.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.layers.wrappers.WeightNormalization.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.layers.wrappers.WeightNormalization.activity_regularizer": "tfa.layers.WeightNormalization.activity_regularizer",
+ "tfa.layers.wrappers.WeightNormalization.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.layers.wrappers.WeightNormalization.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.layers.wrappers.WeightNormalization.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.layers.wrappers.WeightNormalization.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.layers.wrappers.WeightNormalization.build": "tfa.layers.WeightNormalization.build",
+ "tfa.layers.wrappers.WeightNormalization.call": "tfa.layers.WeightNormalization.call",
+ "tfa.layers.wrappers.WeightNormalization.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.layers.wrappers.WeightNormalization.compute_output_shape": "tfa.layers.WeightNormalization.compute_output_shape",
+ "tfa.layers.wrappers.WeightNormalization.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.layers.wrappers.WeightNormalization.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.layers.wrappers.WeightNormalization.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.layers.wrappers.WeightNormalization.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.layers.wrappers.WeightNormalization.get_config": "tfa.layers.WeightNormalization.get_config",
+ "tfa.layers.wrappers.WeightNormalization.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.layers.wrappers.WeightNormalization.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.layers.wrappers.WeightNormalization.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.layers.wrappers.WeightNormalization.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.layers.wrappers.WeightNormalization.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.layers.wrappers.WeightNormalization.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.layers.wrappers.WeightNormalization.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.layers.wrappers.WeightNormalization.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.layers.wrappers.WeightNormalization.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.layers.wrappers.WeightNormalization.input": "tfa.layers.CorrelationCost.input",
+ "tfa.layers.wrappers.WeightNormalization.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.layers.wrappers.WeightNormalization.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.layers.wrappers.WeightNormalization.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.layers.wrappers.WeightNormalization.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.layers.wrappers.WeightNormalization.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.layers.wrappers.WeightNormalization.name": "tfa.layers.CorrelationCost.name",
+ "tfa.layers.wrappers.WeightNormalization.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.layers.wrappers.WeightNormalization.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.layers.wrappers.WeightNormalization.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.layers.wrappers.WeightNormalization.output": "tfa.layers.CorrelationCost.output",
+ "tfa.layers.wrappers.WeightNormalization.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.layers.wrappers.WeightNormalization.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.layers.wrappers.WeightNormalization.remove": "tfa.layers.WeightNormalization.remove",
+ "tfa.layers.wrappers.WeightNormalization.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.layers.wrappers.WeightNormalization.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.layers.wrappers.WeightNormalization.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.layers.wrappers.WeightNormalization.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.layers.wrappers.WeightNormalization.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.layers.wrappers.WeightNormalization.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.layers.wrappers.WeightNormalization.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.layers.wrappers.WeightNormalization.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.layers.wrappers.absolute_import": "tfa.activations.absolute_import",
+ "tfa.layers.wrappers.division": "tfa.activations.division",
+ "tfa.layers.wrappers.print_function": "tfa.activations.print_function",
+ "tfa.losses.ContrastiveLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.ContrastiveLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.ContrastiveLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.ContrastiveLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.ContrastiveLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.ContrastiveLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.ContrastiveLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.GIoULoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.GIoULoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.GIoULoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.GIoULoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.GIoULoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.GIoULoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.GIoULoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.GIoULoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.LiftedStructLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.LiftedStructLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.LiftedStructLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.LiftedStructLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.LiftedStructLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.LiftedStructLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.LiftedStructLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.LiftedStructLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.NpairsLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.NpairsLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.NpairsLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.NpairsLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.NpairsLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.NpairsLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.NpairsLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.NpairsLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.NpairsMultilabelLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.NpairsMultilabelLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.NpairsMultilabelLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.NpairsMultilabelLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.NpairsMultilabelLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.NpairsMultilabelLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.NpairsMultilabelLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.NpairsMultilabelLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.NpairsMultilabelLoss.get_config": "tfa.losses.NpairsLoss.get_config",
+ "tfa.losses.SigmoidFocalCrossEntropy.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.SigmoidFocalCrossEntropy.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.SigmoidFocalCrossEntropy.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.SigmoidFocalCrossEntropy.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.SigmoidFocalCrossEntropy.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.SigmoidFocalCrossEntropy.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.SigmoidFocalCrossEntropy.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.SigmoidFocalCrossEntropy.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.SparsemaxLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.SparsemaxLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.SparsemaxLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.SparsemaxLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.SparsemaxLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.SparsemaxLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.SparsemaxLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.SparsemaxLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.TripletSemiHardLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.TripletSemiHardLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.TripletSemiHardLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.TripletSemiHardLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.TripletSemiHardLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.TripletSemiHardLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.TripletSemiHardLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.TripletSemiHardLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.absolute_import": "tfa.activations.absolute_import",
+ "tfa.losses.contrastive.ContrastiveLoss": "tfa.losses.ContrastiveLoss",
+ "tfa.losses.contrastive.ContrastiveLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.contrastive.ContrastiveLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.contrastive.ContrastiveLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.contrastive.ContrastiveLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.contrastive.ContrastiveLoss.__init__": "tfa.losses.ContrastiveLoss.__init__",
+ "tfa.losses.contrastive.ContrastiveLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.contrastive.ContrastiveLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.contrastive.ContrastiveLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.contrastive.ContrastiveLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.contrastive.ContrastiveLoss.call": "tfa.losses.ContrastiveLoss.call",
+ "tfa.losses.contrastive.ContrastiveLoss.get_config": "tfa.losses.ContrastiveLoss.get_config",
+ "tfa.losses.contrastive.absolute_import": "tfa.activations.absolute_import",
+ "tfa.losses.contrastive.contrastive_loss": "tfa.losses.contrastive_loss",
+ "tfa.losses.contrastive.division": "tfa.activations.division",
+ "tfa.losses.contrastive.print_function": "tfa.activations.print_function",
+ "tfa.losses.division": "tfa.activations.division",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy": "tfa.losses.SigmoidFocalCrossEntropy",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__init__": "tfa.losses.SigmoidFocalCrossEntropy.__init__",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.call": "tfa.losses.SigmoidFocalCrossEntropy.call",
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.get_config": "tfa.losses.SigmoidFocalCrossEntropy.get_config",
+ "tfa.losses.focal_loss.absolute_import": "tfa.activations.absolute_import",
+ "tfa.losses.focal_loss.division": "tfa.activations.division",
+ "tfa.losses.focal_loss.print_function": "tfa.activations.print_function",
+ "tfa.losses.focal_loss.sigmoid_focal_crossentropy": "tfa.losses.sigmoid_focal_crossentropy",
+ "tfa.losses.lifted.LiftedStructLoss": "tfa.losses.LiftedStructLoss",
+ "tfa.losses.lifted.LiftedStructLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.lifted.LiftedStructLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.lifted.LiftedStructLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.lifted.LiftedStructLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.lifted.LiftedStructLoss.__init__": "tfa.losses.LiftedStructLoss.__init__",
+ "tfa.losses.lifted.LiftedStructLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.lifted.LiftedStructLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.lifted.LiftedStructLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.lifted.LiftedStructLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.lifted.LiftedStructLoss.call": "tfa.losses.LiftedStructLoss.call",
+ "tfa.losses.lifted.LiftedStructLoss.get_config": "tfa.losses.LiftedStructLoss.get_config",
+ "tfa.losses.lifted.absolute_import": "tfa.activations.absolute_import",
+ "tfa.losses.lifted.division": "tfa.activations.division",
+ "tfa.losses.lifted.lifted_struct_loss": "tfa.losses.lifted_struct_loss",
+ "tfa.losses.lifted.print_function": "tfa.activations.print_function",
+ "tfa.losses.metric_learning.absolute_import": "tfa.activations.absolute_import",
+ "tfa.losses.metric_learning.division": "tfa.activations.division",
+ "tfa.losses.metric_learning.print_function": "tfa.activations.print_function",
+ "tfa.losses.npairs.NpairsLoss": "tfa.losses.NpairsLoss",
+ "tfa.losses.npairs.NpairsLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.npairs.NpairsLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.npairs.NpairsLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.npairs.NpairsLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.npairs.NpairsLoss.__init__": "tfa.losses.NpairsLoss.__init__",
+ "tfa.losses.npairs.NpairsLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.npairs.NpairsLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.npairs.NpairsLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.npairs.NpairsLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.npairs.NpairsLoss.call": "tfa.losses.NpairsLoss.call",
+ "tfa.losses.npairs.NpairsLoss.get_config": "tfa.losses.NpairsLoss.get_config",
+ "tfa.losses.npairs.NpairsMultilabelLoss": "tfa.losses.NpairsMultilabelLoss",
+ "tfa.losses.npairs.NpairsMultilabelLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.npairs.NpairsMultilabelLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.npairs.NpairsMultilabelLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.npairs.NpairsMultilabelLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.npairs.NpairsMultilabelLoss.__init__": "tfa.losses.NpairsMultilabelLoss.__init__",
+ "tfa.losses.npairs.NpairsMultilabelLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.npairs.NpairsMultilabelLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.npairs.NpairsMultilabelLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.npairs.NpairsMultilabelLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.npairs.NpairsMultilabelLoss.call": "tfa.losses.NpairsMultilabelLoss.call",
+ "tfa.losses.npairs.NpairsMultilabelLoss.get_config": "tfa.losses.NpairsLoss.get_config",
+ "tfa.losses.npairs.absolute_import": "tfa.activations.absolute_import",
+ "tfa.losses.npairs.division": "tfa.activations.division",
+ "tfa.losses.npairs.npairs_loss": "tfa.losses.npairs_loss",
+ "tfa.losses.npairs.npairs_multilabel_loss": "tfa.losses.npairs_multilabel_loss",
+ "tfa.losses.npairs.print_function": "tfa.activations.print_function",
+ "tfa.losses.print_function": "tfa.activations.print_function",
+ "tfa.losses.triplet.TripletSemiHardLoss": "tfa.losses.TripletSemiHardLoss",
+ "tfa.losses.triplet.TripletSemiHardLoss.__call__": "tfa.losses.ContrastiveLoss.__call__",
+ "tfa.losses.triplet.TripletSemiHardLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.losses.triplet.TripletSemiHardLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.losses.triplet.TripletSemiHardLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.losses.triplet.TripletSemiHardLoss.__init__": "tfa.losses.TripletSemiHardLoss.__init__",
+ "tfa.losses.triplet.TripletSemiHardLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.losses.triplet.TripletSemiHardLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.losses.triplet.TripletSemiHardLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.losses.triplet.TripletSemiHardLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.losses.triplet.TripletSemiHardLoss.call": "tfa.losses.TripletSemiHardLoss.call",
+ "tfa.losses.triplet.TripletSemiHardLoss.get_config": "tfa.losses.TripletSemiHardLoss.get_config",
+ "tfa.losses.triplet.absolute_import": "tfa.activations.absolute_import",
+ "tfa.losses.triplet.division": "tfa.activations.division",
+ "tfa.losses.triplet.print_function": "tfa.activations.print_function",
+ "tfa.losses.triplet.triplet_semihard_loss": "tfa.losses.triplet_semihard_loss",
+ "tfa.metrics.CohenKappa.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.CohenKappa.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.CohenKappa.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.CohenKappa.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.CohenKappa.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.CohenKappa.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.CohenKappa.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.CohenKappa.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.CohenKappa.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.CohenKappa.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.CohenKappa.build": "tfa.layers.GELU.build",
+ "tfa.metrics.CohenKappa.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.CohenKappa.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.CohenKappa.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.CohenKappa.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.CohenKappa.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.CohenKappa.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.CohenKappa.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.CohenKappa.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.CohenKappa.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.CohenKappa.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.CohenKappa.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.CohenKappa.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.CohenKappa.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.CohenKappa.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.CohenKappa.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.CohenKappa.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.CohenKappa.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.CohenKappa.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.CohenKappa.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.CohenKappa.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.CohenKappa.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.CohenKappa.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.CohenKappa.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.CohenKappa.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.CohenKappa.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.CohenKappa.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.CohenKappa.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.CohenKappa.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.CohenKappa.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.CohenKappa.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.CohenKappa.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.CohenKappa.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.CohenKappa.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.CohenKappa.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.F1Score.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.F1Score.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.F1Score.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.F1Score.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.F1Score.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.F1Score.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.F1Score.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.F1Score.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.F1Score.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.F1Score.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.F1Score.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.F1Score.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.F1Score.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.F1Score.build": "tfa.layers.GELU.build",
+ "tfa.metrics.F1Score.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.F1Score.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.F1Score.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.F1Score.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.F1Score.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.F1Score.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.F1Score.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.F1Score.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.F1Score.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.F1Score.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.F1Score.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.F1Score.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.F1Score.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.F1Score.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.F1Score.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.F1Score.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.F1Score.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.F1Score.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.F1Score.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.F1Score.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.F1Score.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.F1Score.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.F1Score.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.F1Score.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.F1Score.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.F1Score.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.F1Score.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.F1Score.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.F1Score.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.F1Score.reset_states": "tfa.metrics.FBetaScore.reset_states",
+ "tfa.metrics.F1Score.result": "tfa.metrics.FBetaScore.result",
+ "tfa.metrics.F1Score.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.F1Score.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.F1Score.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.F1Score.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.F1Score.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.F1Score.update_state": "tfa.metrics.FBetaScore.update_state",
+ "tfa.metrics.F1Score.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.F1Score.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.F1Score.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.FBetaScore.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.FBetaScore.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.FBetaScore.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.FBetaScore.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.FBetaScore.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.FBetaScore.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.FBetaScore.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.FBetaScore.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.FBetaScore.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.FBetaScore.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.FBetaScore.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.FBetaScore.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.FBetaScore.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.FBetaScore.build": "tfa.layers.GELU.build",
+ "tfa.metrics.FBetaScore.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.FBetaScore.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.FBetaScore.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.FBetaScore.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.FBetaScore.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.FBetaScore.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.FBetaScore.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.FBetaScore.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.FBetaScore.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.FBetaScore.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.FBetaScore.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.FBetaScore.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.FBetaScore.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.FBetaScore.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.FBetaScore.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.FBetaScore.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.FBetaScore.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.FBetaScore.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.FBetaScore.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.FBetaScore.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.FBetaScore.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.FBetaScore.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.FBetaScore.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.FBetaScore.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.FBetaScore.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.FBetaScore.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.FBetaScore.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.FBetaScore.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.FBetaScore.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.FBetaScore.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.FBetaScore.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.FBetaScore.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.FBetaScore.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.FBetaScore.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.FBetaScore.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.FBetaScore.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.FBetaScore.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.HammingLoss.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.HammingLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.HammingLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.HammingLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.HammingLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.HammingLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.HammingLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.HammingLoss.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.HammingLoss.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.HammingLoss.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.HammingLoss.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.HammingLoss.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.HammingLoss.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.HammingLoss.build": "tfa.layers.GELU.build",
+ "tfa.metrics.HammingLoss.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.HammingLoss.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.HammingLoss.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.HammingLoss.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.HammingLoss.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.HammingLoss.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.HammingLoss.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.HammingLoss.get_config": "tfa.metrics.MeanMetricWrapper.get_config",
+ "tfa.metrics.HammingLoss.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.HammingLoss.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.HammingLoss.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.HammingLoss.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.HammingLoss.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.HammingLoss.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.HammingLoss.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.HammingLoss.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.HammingLoss.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.HammingLoss.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.HammingLoss.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.HammingLoss.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.HammingLoss.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.HammingLoss.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.HammingLoss.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.HammingLoss.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.HammingLoss.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.HammingLoss.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.HammingLoss.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.HammingLoss.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.HammingLoss.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.HammingLoss.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.HammingLoss.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.HammingLoss.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.HammingLoss.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.HammingLoss.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.HammingLoss.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.HammingLoss.update_state": "tfa.metrics.MeanMetricWrapper.update_state",
+ "tfa.metrics.HammingLoss.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.HammingLoss.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.HammingLoss.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.MatthewsCorrelationCoefficient.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.MatthewsCorrelationCoefficient.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.MatthewsCorrelationCoefficient.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.MatthewsCorrelationCoefficient.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.MatthewsCorrelationCoefficient.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.MatthewsCorrelationCoefficient.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.MatthewsCorrelationCoefficient.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.MatthewsCorrelationCoefficient.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.MatthewsCorrelationCoefficient.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.MatthewsCorrelationCoefficient.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.MatthewsCorrelationCoefficient.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.MatthewsCorrelationCoefficient.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.MatthewsCorrelationCoefficient.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.MatthewsCorrelationCoefficient.build": "tfa.layers.GELU.build",
+ "tfa.metrics.MatthewsCorrelationCoefficient.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.MatthewsCorrelationCoefficient.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.MatthewsCorrelationCoefficient.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.MatthewsCorrelationCoefficient.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.MatthewsCorrelationCoefficient.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.MatthewsCorrelationCoefficient.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.MatthewsCorrelationCoefficient.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.MatthewsCorrelationCoefficient.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.MatthewsCorrelationCoefficient.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.MatthewsCorrelationCoefficient.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.MatthewsCorrelationCoefficient.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.MatthewsCorrelationCoefficient.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.MatthewsCorrelationCoefficient.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.MatthewsCorrelationCoefficient.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.MatthewsCorrelationCoefficient.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.MatthewsCorrelationCoefficient.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.MatthewsCorrelationCoefficient.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.MatthewsCorrelationCoefficient.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.MatthewsCorrelationCoefficient.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.MatthewsCorrelationCoefficient.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.MatthewsCorrelationCoefficient.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.MatthewsCorrelationCoefficient.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.MatthewsCorrelationCoefficient.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.MatthewsCorrelationCoefficient.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.MatthewsCorrelationCoefficient.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.MatthewsCorrelationCoefficient.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.MatthewsCorrelationCoefficient.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.MatthewsCorrelationCoefficient.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.MeanMetricWrapper.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.MeanMetricWrapper.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.MeanMetricWrapper.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.MeanMetricWrapper.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.MeanMetricWrapper.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.MeanMetricWrapper.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.MeanMetricWrapper.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.MeanMetricWrapper.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.MeanMetricWrapper.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.MeanMetricWrapper.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.MeanMetricWrapper.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.MeanMetricWrapper.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.MeanMetricWrapper.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.MeanMetricWrapper.build": "tfa.layers.GELU.build",
+ "tfa.metrics.MeanMetricWrapper.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.MeanMetricWrapper.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.MeanMetricWrapper.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.MeanMetricWrapper.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.MeanMetricWrapper.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.MeanMetricWrapper.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.MeanMetricWrapper.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.MeanMetricWrapper.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.MeanMetricWrapper.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.MeanMetricWrapper.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.MeanMetricWrapper.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.MeanMetricWrapper.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.MeanMetricWrapper.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.MeanMetricWrapper.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.MeanMetricWrapper.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.MeanMetricWrapper.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.MeanMetricWrapper.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.MeanMetricWrapper.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.MeanMetricWrapper.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.MeanMetricWrapper.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.MeanMetricWrapper.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.MeanMetricWrapper.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.MeanMetricWrapper.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.MeanMetricWrapper.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.MeanMetricWrapper.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.MeanMetricWrapper.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.MeanMetricWrapper.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.MeanMetricWrapper.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.MeanMetricWrapper.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.MeanMetricWrapper.reset_states": "tfa.metrics.HammingLoss.reset_states",
+ "tfa.metrics.MeanMetricWrapper.result": "tfa.metrics.HammingLoss.result",
+ "tfa.metrics.MeanMetricWrapper.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.MeanMetricWrapper.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.MeanMetricWrapper.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.MeanMetricWrapper.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.MeanMetricWrapper.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.MeanMetricWrapper.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.MeanMetricWrapper.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.MeanMetricWrapper.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.MultiLabelConfusionMatrix.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.MultiLabelConfusionMatrix.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.MultiLabelConfusionMatrix.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.MultiLabelConfusionMatrix.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.MultiLabelConfusionMatrix.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.MultiLabelConfusionMatrix.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.MultiLabelConfusionMatrix.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.MultiLabelConfusionMatrix.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.MultiLabelConfusionMatrix.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.MultiLabelConfusionMatrix.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.MultiLabelConfusionMatrix.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.MultiLabelConfusionMatrix.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.MultiLabelConfusionMatrix.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.MultiLabelConfusionMatrix.build": "tfa.layers.GELU.build",
+ "tfa.metrics.MultiLabelConfusionMatrix.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.MultiLabelConfusionMatrix.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.MultiLabelConfusionMatrix.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.MultiLabelConfusionMatrix.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.MultiLabelConfusionMatrix.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.MultiLabelConfusionMatrix.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.MultiLabelConfusionMatrix.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.MultiLabelConfusionMatrix.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.MultiLabelConfusionMatrix.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.MultiLabelConfusionMatrix.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.MultiLabelConfusionMatrix.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.MultiLabelConfusionMatrix.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.MultiLabelConfusionMatrix.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.MultiLabelConfusionMatrix.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.MultiLabelConfusionMatrix.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.MultiLabelConfusionMatrix.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.MultiLabelConfusionMatrix.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.MultiLabelConfusionMatrix.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.MultiLabelConfusionMatrix.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.MultiLabelConfusionMatrix.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.MultiLabelConfusionMatrix.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.MultiLabelConfusionMatrix.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.MultiLabelConfusionMatrix.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.MultiLabelConfusionMatrix.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.MultiLabelConfusionMatrix.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.MultiLabelConfusionMatrix.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.MultiLabelConfusionMatrix.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.MultiLabelConfusionMatrix.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.MultiLabelConfusionMatrix.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.MultiLabelConfusionMatrix.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.MultiLabelConfusionMatrix.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.MultiLabelConfusionMatrix.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.MultiLabelConfusionMatrix.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.MultiLabelConfusionMatrix.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.MultiLabelConfusionMatrix.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.MultiLabelConfusionMatrix.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.MultiLabelConfusionMatrix.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.RSquare.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.RSquare.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.RSquare.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.RSquare.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.RSquare.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.RSquare.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.RSquare.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.RSquare.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.RSquare.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.RSquare.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.RSquare.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.RSquare.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.RSquare.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.RSquare.build": "tfa.layers.GELU.build",
+ "tfa.metrics.RSquare.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.RSquare.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.RSquare.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.RSquare.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.RSquare.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.RSquare.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.RSquare.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.RSquare.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.RSquare.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.RSquare.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.RSquare.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.RSquare.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.RSquare.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.RSquare.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.RSquare.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.RSquare.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.RSquare.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.RSquare.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.RSquare.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.RSquare.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.RSquare.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.RSquare.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.RSquare.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.RSquare.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.RSquare.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.RSquare.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.RSquare.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.RSquare.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.RSquare.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.RSquare.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.RSquare.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.RSquare.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.RSquare.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.RSquare.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.RSquare.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.RSquare.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.RSquare.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.absolute_import": "tfa.activations.absolute_import",
+ "tfa.metrics.cohens_kappa.CohenKappa": "tfa.metrics.CohenKappa",
+ "tfa.metrics.cohens_kappa.CohenKappa.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.cohens_kappa.CohenKappa.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.cohens_kappa.CohenKappa.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.cohens_kappa.CohenKappa.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.cohens_kappa.CohenKappa.__init__": "tfa.metrics.CohenKappa.__init__",
+ "tfa.metrics.cohens_kappa.CohenKappa.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.cohens_kappa.CohenKappa.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.cohens_kappa.CohenKappa.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.cohens_kappa.CohenKappa.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.cohens_kappa.CohenKappa.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.cohens_kappa.CohenKappa.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.cohens_kappa.CohenKappa.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.cohens_kappa.CohenKappa.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.cohens_kappa.CohenKappa.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.cohens_kappa.CohenKappa.build": "tfa.layers.GELU.build",
+ "tfa.metrics.cohens_kappa.CohenKappa.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.cohens_kappa.CohenKappa.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.cohens_kappa.CohenKappa.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.cohens_kappa.CohenKappa.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.cohens_kappa.CohenKappa.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.cohens_kappa.CohenKappa.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.cohens_kappa.CohenKappa.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_config": "tfa.metrics.CohenKappa.get_config",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.cohens_kappa.CohenKappa.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.cohens_kappa.CohenKappa.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.cohens_kappa.CohenKappa.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.cohens_kappa.CohenKappa.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.cohens_kappa.CohenKappa.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.cohens_kappa.CohenKappa.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.cohens_kappa.CohenKappa.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.cohens_kappa.CohenKappa.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.cohens_kappa.CohenKappa.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.cohens_kappa.CohenKappa.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.cohens_kappa.CohenKappa.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.cohens_kappa.CohenKappa.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.cohens_kappa.CohenKappa.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.cohens_kappa.CohenKappa.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.cohens_kappa.CohenKappa.reset_states": "tfa.metrics.CohenKappa.reset_states",
+ "tfa.metrics.cohens_kappa.CohenKappa.result": "tfa.metrics.CohenKappa.result",
+ "tfa.metrics.cohens_kappa.CohenKappa.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.cohens_kappa.CohenKappa.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.cohens_kappa.CohenKappa.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.cohens_kappa.CohenKappa.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.cohens_kappa.CohenKappa.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.cohens_kappa.CohenKappa.update_state": "tfa.metrics.CohenKappa.update_state",
+ "tfa.metrics.cohens_kappa.CohenKappa.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.cohens_kappa.CohenKappa.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.cohens_kappa.CohenKappa.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.cohens_kappa.absolute_import": "tfa.activations.absolute_import",
+ "tfa.metrics.cohens_kappa.division": "tfa.activations.division",
+ "tfa.metrics.cohens_kappa.print_function": "tfa.activations.print_function",
+ "tfa.metrics.division": "tfa.activations.division",
+ "tfa.metrics.f_scores.F1Score": "tfa.metrics.F1Score",
+ "tfa.metrics.f_scores.F1Score.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.f_scores.F1Score.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.f_scores.F1Score.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.f_scores.F1Score.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.f_scores.F1Score.__init__": "tfa.metrics.F1Score.__init__",
+ "tfa.metrics.f_scores.F1Score.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.f_scores.F1Score.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.f_scores.F1Score.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.f_scores.F1Score.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.f_scores.F1Score.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.f_scores.F1Score.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.f_scores.F1Score.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.f_scores.F1Score.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.f_scores.F1Score.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.f_scores.F1Score.build": "tfa.layers.GELU.build",
+ "tfa.metrics.f_scores.F1Score.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.f_scores.F1Score.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.f_scores.F1Score.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.f_scores.F1Score.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.f_scores.F1Score.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.f_scores.F1Score.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.f_scores.F1Score.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.f_scores.F1Score.get_config": "tfa.metrics.F1Score.get_config",
+ "tfa.metrics.f_scores.F1Score.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.f_scores.F1Score.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.f_scores.F1Score.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.f_scores.F1Score.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.f_scores.F1Score.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.f_scores.F1Score.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.f_scores.F1Score.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.f_scores.F1Score.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.f_scores.F1Score.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.f_scores.F1Score.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.f_scores.F1Score.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.f_scores.F1Score.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.f_scores.F1Score.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.f_scores.F1Score.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.f_scores.F1Score.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.f_scores.F1Score.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.f_scores.F1Score.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.f_scores.F1Score.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.f_scores.F1Score.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.f_scores.F1Score.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.f_scores.F1Score.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.f_scores.F1Score.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.f_scores.F1Score.reset_states": "tfa.metrics.FBetaScore.reset_states",
+ "tfa.metrics.f_scores.F1Score.result": "tfa.metrics.FBetaScore.result",
+ "tfa.metrics.f_scores.F1Score.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.f_scores.F1Score.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.f_scores.F1Score.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.f_scores.F1Score.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.f_scores.F1Score.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.f_scores.F1Score.update_state": "tfa.metrics.FBetaScore.update_state",
+ "tfa.metrics.f_scores.F1Score.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.f_scores.F1Score.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.f_scores.F1Score.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.f_scores.FBetaScore": "tfa.metrics.FBetaScore",
+ "tfa.metrics.f_scores.FBetaScore.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.f_scores.FBetaScore.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.f_scores.FBetaScore.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.f_scores.FBetaScore.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.f_scores.FBetaScore.__init__": "tfa.metrics.FBetaScore.__init__",
+ "tfa.metrics.f_scores.FBetaScore.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.f_scores.FBetaScore.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.f_scores.FBetaScore.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.f_scores.FBetaScore.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.f_scores.FBetaScore.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.f_scores.FBetaScore.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.f_scores.FBetaScore.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.f_scores.FBetaScore.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.f_scores.FBetaScore.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.f_scores.FBetaScore.build": "tfa.layers.GELU.build",
+ "tfa.metrics.f_scores.FBetaScore.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.f_scores.FBetaScore.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.f_scores.FBetaScore.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.f_scores.FBetaScore.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.f_scores.FBetaScore.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.f_scores.FBetaScore.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.f_scores.FBetaScore.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.f_scores.FBetaScore.get_config": "tfa.metrics.FBetaScore.get_config",
+ "tfa.metrics.f_scores.FBetaScore.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.f_scores.FBetaScore.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.f_scores.FBetaScore.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.f_scores.FBetaScore.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.f_scores.FBetaScore.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.f_scores.FBetaScore.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.f_scores.FBetaScore.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.f_scores.FBetaScore.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.f_scores.FBetaScore.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.f_scores.FBetaScore.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.f_scores.FBetaScore.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.f_scores.FBetaScore.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.f_scores.FBetaScore.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.f_scores.FBetaScore.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.f_scores.FBetaScore.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.f_scores.FBetaScore.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.f_scores.FBetaScore.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.f_scores.FBetaScore.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.f_scores.FBetaScore.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.f_scores.FBetaScore.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.f_scores.FBetaScore.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.f_scores.FBetaScore.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.f_scores.FBetaScore.reset_states": "tfa.metrics.FBetaScore.reset_states",
+ "tfa.metrics.f_scores.FBetaScore.result": "tfa.metrics.FBetaScore.result",
+ "tfa.metrics.f_scores.FBetaScore.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.f_scores.FBetaScore.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.f_scores.FBetaScore.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.f_scores.FBetaScore.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.f_scores.FBetaScore.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.f_scores.FBetaScore.update_state": "tfa.metrics.FBetaScore.update_state",
+ "tfa.metrics.f_scores.FBetaScore.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.f_scores.FBetaScore.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.f_scores.FBetaScore.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.f_scores.absolute_import": "tfa.activations.absolute_import",
+ "tfa.metrics.f_scores.division": "tfa.activations.division",
+ "tfa.metrics.f_scores.print_function": "tfa.activations.print_function",
+ "tfa.metrics.hamming.HammingLoss": "tfa.metrics.HammingLoss",
+ "tfa.metrics.hamming.HammingLoss.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.hamming.HammingLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.hamming.HammingLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.hamming.HammingLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.hamming.HammingLoss.__init__": "tfa.metrics.HammingLoss.__init__",
+ "tfa.metrics.hamming.HammingLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.hamming.HammingLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.hamming.HammingLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.hamming.HammingLoss.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.hamming.HammingLoss.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.hamming.HammingLoss.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.hamming.HammingLoss.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.hamming.HammingLoss.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.hamming.HammingLoss.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.hamming.HammingLoss.build": "tfa.layers.GELU.build",
+ "tfa.metrics.hamming.HammingLoss.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.hamming.HammingLoss.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.hamming.HammingLoss.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.hamming.HammingLoss.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.hamming.HammingLoss.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.hamming.HammingLoss.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.hamming.HammingLoss.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.hamming.HammingLoss.get_config": "tfa.metrics.MeanMetricWrapper.get_config",
+ "tfa.metrics.hamming.HammingLoss.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.hamming.HammingLoss.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.hamming.HammingLoss.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.hamming.HammingLoss.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.hamming.HammingLoss.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.hamming.HammingLoss.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.hamming.HammingLoss.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.hamming.HammingLoss.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.hamming.HammingLoss.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.hamming.HammingLoss.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.hamming.HammingLoss.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.hamming.HammingLoss.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.hamming.HammingLoss.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.hamming.HammingLoss.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.hamming.HammingLoss.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.hamming.HammingLoss.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.hamming.HammingLoss.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.hamming.HammingLoss.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.hamming.HammingLoss.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.hamming.HammingLoss.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.hamming.HammingLoss.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.hamming.HammingLoss.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.hamming.HammingLoss.reset_states": "tfa.metrics.HammingLoss.reset_states",
+ "tfa.metrics.hamming.HammingLoss.result": "tfa.metrics.HammingLoss.result",
+ "tfa.metrics.hamming.HammingLoss.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.hamming.HammingLoss.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.hamming.HammingLoss.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.hamming.HammingLoss.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.hamming.HammingLoss.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.hamming.HammingLoss.update_state": "tfa.metrics.MeanMetricWrapper.update_state",
+ "tfa.metrics.hamming.HammingLoss.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.hamming.HammingLoss.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.hamming.HammingLoss.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.hamming.absolute_import": "tfa.activations.absolute_import",
+ "tfa.metrics.hamming.division": "tfa.activations.division",
+ "tfa.metrics.hamming.hamming_distance": "tfa.metrics.hamming_distance",
+ "tfa.metrics.hamming.print_function": "tfa.activations.print_function",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient": "tfa.metrics.MatthewsCorrelationCoefficient",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__init__": "tfa.metrics.MatthewsCorrelationCoefficient.__init__",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.build": "tfa.layers.GELU.build",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_config": "tfa.metrics.MatthewsCorrelationCoefficient.get_config",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.reset_states": "tfa.metrics.MatthewsCorrelationCoefficient.reset_states",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.result": "tfa.metrics.MatthewsCorrelationCoefficient.result",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.update_state": "tfa.metrics.MatthewsCorrelationCoefficient.update_state",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.matthews_correlation_coefficient.absolute_import": "tfa.activations.absolute_import",
+ "tfa.metrics.matthews_correlation_coefficient.division": "tfa.activations.division",
+ "tfa.metrics.matthews_correlation_coefficient.print_function": "tfa.activations.print_function",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix": "tfa.metrics.MultiLabelConfusionMatrix",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__init__": "tfa.metrics.MultiLabelConfusionMatrix.__init__",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.build": "tfa.layers.GELU.build",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_config": "tfa.metrics.MultiLabelConfusionMatrix.get_config",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.reset_states": "tfa.metrics.MultiLabelConfusionMatrix.reset_states",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.result": "tfa.metrics.MultiLabelConfusionMatrix.result",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.update_state": "tfa.metrics.MultiLabelConfusionMatrix.update_state",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.multilabel_confusion_matrix.absolute_import": "tfa.activations.absolute_import",
+ "tfa.metrics.multilabel_confusion_matrix.division": "tfa.activations.division",
+ "tfa.metrics.multilabel_confusion_matrix.print_function": "tfa.activations.print_function",
+ "tfa.metrics.print_function": "tfa.activations.print_function",
+ "tfa.metrics.r_square.RSquare": "tfa.metrics.RSquare",
+ "tfa.metrics.r_square.RSquare.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.r_square.RSquare.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.r_square.RSquare.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.r_square.RSquare.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.r_square.RSquare.__init__": "tfa.metrics.RSquare.__init__",
+ "tfa.metrics.r_square.RSquare.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.r_square.RSquare.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.r_square.RSquare.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.r_square.RSquare.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.r_square.RSquare.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.r_square.RSquare.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.r_square.RSquare.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.r_square.RSquare.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.r_square.RSquare.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.r_square.RSquare.build": "tfa.layers.GELU.build",
+ "tfa.metrics.r_square.RSquare.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.r_square.RSquare.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.r_square.RSquare.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.r_square.RSquare.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.r_square.RSquare.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.r_square.RSquare.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.r_square.RSquare.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.r_square.RSquare.get_config": "tfa.metrics.RSquare.get_config",
+ "tfa.metrics.r_square.RSquare.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.r_square.RSquare.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.r_square.RSquare.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.r_square.RSquare.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.r_square.RSquare.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.r_square.RSquare.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.r_square.RSquare.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.r_square.RSquare.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.r_square.RSquare.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.r_square.RSquare.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.r_square.RSquare.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.r_square.RSquare.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.r_square.RSquare.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.r_square.RSquare.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.r_square.RSquare.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.r_square.RSquare.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.r_square.RSquare.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.r_square.RSquare.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.r_square.RSquare.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.r_square.RSquare.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.r_square.RSquare.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.r_square.RSquare.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.r_square.RSquare.reset_states": "tfa.metrics.RSquare.reset_states",
+ "tfa.metrics.r_square.RSquare.result": "tfa.metrics.RSquare.result",
+ "tfa.metrics.r_square.RSquare.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.r_square.RSquare.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.r_square.RSquare.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.r_square.RSquare.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.r_square.RSquare.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.r_square.RSquare.update_state": "tfa.metrics.RSquare.update_state",
+ "tfa.metrics.r_square.RSquare.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.r_square.RSquare.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.r_square.RSquare.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.r_square.absolute_import": "tfa.activations.absolute_import",
+ "tfa.metrics.r_square.division": "tfa.activations.division",
+ "tfa.metrics.r_square.print_function": "tfa.activations.print_function",
+ "tfa.metrics.utils.MeanMetricWrapper": "tfa.metrics.MeanMetricWrapper",
+ "tfa.metrics.utils.MeanMetricWrapper.__call__": "tfa.metrics.CohenKappa.__call__",
+ "tfa.metrics.utils.MeanMetricWrapper.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.metrics.utils.MeanMetricWrapper.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.metrics.utils.MeanMetricWrapper.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.metrics.utils.MeanMetricWrapper.__init__": "tfa.metrics.MeanMetricWrapper.__init__",
+ "tfa.metrics.utils.MeanMetricWrapper.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.metrics.utils.MeanMetricWrapper.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.metrics.utils.MeanMetricWrapper.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.metrics.utils.MeanMetricWrapper.__new__": "tfa.metrics.CohenKappa.__new__",
+ "tfa.metrics.utils.MeanMetricWrapper.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.metrics.utils.MeanMetricWrapper.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.metrics.utils.MeanMetricWrapper.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.metrics.utils.MeanMetricWrapper.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.metrics.utils.MeanMetricWrapper.add_weight": "tfa.metrics.CohenKappa.add_weight",
+ "tfa.metrics.utils.MeanMetricWrapper.build": "tfa.layers.GELU.build",
+ "tfa.metrics.utils.MeanMetricWrapper.call": "tfa.metrics.CohenKappa.call",
+ "tfa.metrics.utils.MeanMetricWrapper.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.metrics.utils.MeanMetricWrapper.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.metrics.utils.MeanMetricWrapper.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.metrics.utils.MeanMetricWrapper.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.metrics.utils.MeanMetricWrapper.dtype": "tfa.metrics.CohenKappa.dtype",
+ "tfa.metrics.utils.MeanMetricWrapper.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.metrics.utils.MeanMetricWrapper.get_config": "tfa.metrics.MeanMetricWrapper.get_config",
+ "tfa.metrics.utils.MeanMetricWrapper.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.metrics.utils.MeanMetricWrapper.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.metrics.utils.MeanMetricWrapper.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.metrics.utils.MeanMetricWrapper.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.metrics.utils.MeanMetricWrapper.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.metrics.utils.MeanMetricWrapper.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.metrics.utils.MeanMetricWrapper.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.metrics.utils.MeanMetricWrapper.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.metrics.utils.MeanMetricWrapper.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.metrics.utils.MeanMetricWrapper.input": "tfa.layers.CorrelationCost.input",
+ "tfa.metrics.utils.MeanMetricWrapper.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.metrics.utils.MeanMetricWrapper.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.metrics.utils.MeanMetricWrapper.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.metrics.utils.MeanMetricWrapper.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.metrics.utils.MeanMetricWrapper.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.metrics.utils.MeanMetricWrapper.name": "tfa.layers.CorrelationCost.name",
+ "tfa.metrics.utils.MeanMetricWrapper.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.metrics.utils.MeanMetricWrapper.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.metrics.utils.MeanMetricWrapper.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.metrics.utils.MeanMetricWrapper.output": "tfa.layers.CorrelationCost.output",
+ "tfa.metrics.utils.MeanMetricWrapper.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.metrics.utils.MeanMetricWrapper.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.metrics.utils.MeanMetricWrapper.reset_states": "tfa.metrics.HammingLoss.reset_states",
+ "tfa.metrics.utils.MeanMetricWrapper.result": "tfa.metrics.HammingLoss.result",
+ "tfa.metrics.utils.MeanMetricWrapper.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.metrics.utils.MeanMetricWrapper.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.metrics.utils.MeanMetricWrapper.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.metrics.utils.MeanMetricWrapper.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.metrics.utils.MeanMetricWrapper.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.metrics.utils.MeanMetricWrapper.update_state": "tfa.metrics.MeanMetricWrapper.update_state",
+ "tfa.metrics.utils.MeanMetricWrapper.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.metrics.utils.MeanMetricWrapper.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.metrics.utils.MeanMetricWrapper.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.metrics.utils.absolute_import": "tfa.activations.absolute_import",
+ "tfa.metrics.utils.division": "tfa.activations.division",
+ "tfa.metrics.utils.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.AdamW.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.AdamW.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.AdamW.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.AdamW.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.AdamW.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.AdamW.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.AdamW.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.AdamW.apply_gradients": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.apply_gradients",
+ "tfa.optimizers.AdamW.get_config": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.get_config",
+ "tfa.optimizers.AdamW.minimize": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.minimize",
+ "tfa.optimizers.AveragedOptimizerWrapper.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.AveragedOptimizerWrapper.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.AveragedOptimizerWrapper.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.AveragedOptimizerWrapper.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.AveragedOptimizerWrapper.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.AveragedOptimizerWrapper.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.AveragedOptimizerWrapper.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.AveragedOptimizerWrapper.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.AveragedOptimizerWrapper.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.AveragedOptimizerWrapper.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.AveragedOptimizerWrapper.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.AveragedOptimizerWrapper.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.AveragedOptimizerWrapper.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.AveragedOptimizerWrapper.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.AveragedOptimizerWrapper.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.AveragedOptimizerWrapper.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.ConditionalGradient.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.ConditionalGradient.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.ConditionalGradient.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.ConditionalGradient.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.ConditionalGradient.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.ConditionalGradient.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.ConditionalGradient.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.ConditionalGradient.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.ConditionalGradient.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.ConditionalGradient.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.ConditionalGradient.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.ConditionalGradient.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.ConditionalGradient.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.ConditionalGradient.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.ConditionalGradient.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.ConditionalGradient.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.ConditionalGradient.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.ConditionalGradient.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.ConditionalGradient.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.CyclicalLearningRate.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.CyclicalLearningRate.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.CyclicalLearningRate.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.CyclicalLearningRate.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.CyclicalLearningRate.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.CyclicalLearningRate.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.CyclicalLearningRate.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__call__": "tfa.optimizers.CyclicalLearningRate.__call__",
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.ExponentialCyclicalLearningRate.get_config": "tfa.optimizers.CyclicalLearningRate.get_config",
+ "tfa.optimizers.LAMB.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.LAMB.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.LAMB.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.LAMB.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.LAMB.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.LAMB.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.LAMB.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.LAMB.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.LAMB.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.LAMB.apply_gradients": "tfa.optimizers.ConditionalGradient.apply_gradients",
+ "tfa.optimizers.LAMB.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.LAMB.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.LAMB.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.LAMB.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.LAMB.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.LAMB.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.LAMB.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.LAMB.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.LAMB.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.LAMB.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.LazyAdam.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.LazyAdam.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.LazyAdam.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.LazyAdam.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.LazyAdam.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.LazyAdam.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.LazyAdam.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.LazyAdam.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.LazyAdam.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.LazyAdam.apply_gradients": "tfa.optimizers.ConditionalGradient.apply_gradients",
+ "tfa.optimizers.LazyAdam.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.LazyAdam.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.LazyAdam.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.LazyAdam.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.LazyAdam.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.LazyAdam.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.LazyAdam.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.LazyAdam.set_weights": "tfa.optimizers.AdamW.set_weights",
+ "tfa.optimizers.LazyAdam.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.LazyAdam.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.Lookahead.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.Lookahead.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.Lookahead.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.Lookahead.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.Lookahead.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.Lookahead.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.Lookahead.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.Lookahead.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.Lookahead.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.Lookahead.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.Lookahead.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.Lookahead.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.Lookahead.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.Lookahead.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.Lookahead.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.Lookahead.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.Lookahead.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.Lookahead.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.MovingAverage.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.MovingAverage.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.MovingAverage.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.MovingAverage.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.MovingAverage.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.MovingAverage.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.MovingAverage.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.MovingAverage.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.MovingAverage.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.MovingAverage.apply_gradients": "tfa.optimizers.AveragedOptimizerWrapper.apply_gradients",
+ "tfa.optimizers.MovingAverage.assign_average_vars": "tfa.optimizers.AveragedOptimizerWrapper.assign_average_vars",
+ "tfa.optimizers.MovingAverage.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.MovingAverage.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.MovingAverage.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.MovingAverage.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.MovingAverage.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.MovingAverage.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.MovingAverage.learning_rate": "tfa.optimizers.AveragedOptimizerWrapper.learning_rate",
+ "tfa.optimizers.MovingAverage.lr": "tfa.optimizers.AveragedOptimizerWrapper.lr",
+ "tfa.optimizers.MovingAverage.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.MovingAverage.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.MovingAverage.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.MovingAverage.weights": "tfa.optimizers.AveragedOptimizerWrapper.weights",
+ "tfa.optimizers.RectifiedAdam.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.RectifiedAdam.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.RectifiedAdam.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.RectifiedAdam.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.RectifiedAdam.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.RectifiedAdam.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.RectifiedAdam.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.RectifiedAdam.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.RectifiedAdam.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.RectifiedAdam.apply_gradients": "tfa.optimizers.ConditionalGradient.apply_gradients",
+ "tfa.optimizers.RectifiedAdam.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.RectifiedAdam.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.RectifiedAdam.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.RectifiedAdam.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.RectifiedAdam.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.RectifiedAdam.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.RectifiedAdam.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.RectifiedAdam.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.RectifiedAdam.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.SGDW.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.SGDW.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.SGDW.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.SGDW.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.SGDW.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.SGDW.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.SGDW.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.SGDW.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.SGDW.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.SGDW.apply_gradients": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.apply_gradients",
+ "tfa.optimizers.SGDW.get_config": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.get_config",
+ "tfa.optimizers.SGDW.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.SGDW.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.SGDW.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.SGDW.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.SGDW.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.SGDW.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.SGDW.minimize": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.minimize",
+ "tfa.optimizers.SGDW.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.SGDW.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.SGDW.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.SWA.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.SWA.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.SWA.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.SWA.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.SWA.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.SWA.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.SWA.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.SWA.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.SWA.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.SWA.apply_gradients": "tfa.optimizers.AveragedOptimizerWrapper.apply_gradients",
+ "tfa.optimizers.SWA.assign_average_vars": "tfa.optimizers.AveragedOptimizerWrapper.assign_average_vars",
+ "tfa.optimizers.SWA.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.SWA.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.SWA.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.SWA.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.SWA.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.SWA.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.SWA.learning_rate": "tfa.optimizers.AveragedOptimizerWrapper.learning_rate",
+ "tfa.optimizers.SWA.lr": "tfa.optimizers.AveragedOptimizerWrapper.lr",
+ "tfa.optimizers.SWA.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.SWA.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.SWA.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.SWA.weights": "tfa.optimizers.AveragedOptimizerWrapper.weights",
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__call__": "tfa.optimizers.CyclicalLearningRate.__call__",
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.Triangular2CyclicalLearningRate.get_config": "tfa.optimizers.CyclicalLearningRate.get_config",
+ "tfa.optimizers.TriangularCyclicalLearningRate.__call__": "tfa.optimizers.CyclicalLearningRate.__call__",
+ "tfa.optimizers.TriangularCyclicalLearningRate.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.TriangularCyclicalLearningRate.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.TriangularCyclicalLearningRate.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.TriangularCyclicalLearningRate.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.TriangularCyclicalLearningRate.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.TriangularCyclicalLearningRate.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.TriangularCyclicalLearningRate.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.TriangularCyclicalLearningRate.get_config": "tfa.optimizers.CyclicalLearningRate.get_config",
+ "tfa.optimizers.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper": "tfa.optimizers.AveragedOptimizerWrapper",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__init__": "tfa.optimizers.AveragedOptimizerWrapper.__init__",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.apply_gradients": "tfa.optimizers.AveragedOptimizerWrapper.apply_gradients",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.assign_average_vars": "tfa.optimizers.AveragedOptimizerWrapper.assign_average_vars",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.average_op": "tfa.optimizers.AveragedOptimizerWrapper.average_op",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_config": "tfa.optimizers.AveragedOptimizerWrapper.get_config",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.learning_rate": "tfa.optimizers.AveragedOptimizerWrapper.learning_rate",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.lr": "tfa.optimizers.AveragedOptimizerWrapper.lr",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.weights": "tfa.optimizers.AveragedOptimizerWrapper.weights",
+ "tfa.optimizers.average_wrapper.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.average_wrapper.division": "tfa.activations.division",
+ "tfa.optimizers.average_wrapper.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient": "tfa.optimizers.ConditionalGradient",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__init__": "tfa.optimizers.ConditionalGradient.__init__",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.apply_gradients": "tfa.optimizers.ConditionalGradient.apply_gradients",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_config": "tfa.optimizers.ConditionalGradient.get_config",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.conditional_gradient.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.conditional_gradient.division": "tfa.activations.division",
+ "tfa.optimizers.conditional_gradient.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate": "tfa.optimizers.CyclicalLearningRate",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__call__": "tfa.optimizers.CyclicalLearningRate.__call__",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__init__": "tfa.optimizers.CyclicalLearningRate.__init__",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.get_config": "tfa.optimizers.CyclicalLearningRate.get_config",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate": "tfa.optimizers.ExponentialCyclicalLearningRate",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__call__": "tfa.optimizers.CyclicalLearningRate.__call__",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__init__": "tfa.optimizers.ExponentialCyclicalLearningRate.__init__",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.get_config": "tfa.optimizers.CyclicalLearningRate.get_config",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate": "tfa.optimizers.Triangular2CyclicalLearningRate",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__call__": "tfa.optimizers.CyclicalLearningRate.__call__",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__init__": "tfa.optimizers.Triangular2CyclicalLearningRate.__init__",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.get_config": "tfa.optimizers.CyclicalLearningRate.get_config",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate": "tfa.optimizers.TriangularCyclicalLearningRate",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__call__": "tfa.optimizers.CyclicalLearningRate.__call__",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__init__": "tfa.optimizers.TriangularCyclicalLearningRate.__init__",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.get_config": "tfa.optimizers.CyclicalLearningRate.get_config",
+ "tfa.optimizers.cyclical_learning_rate.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.cyclical_learning_rate.division": "tfa.activations.division",
+ "tfa.optimizers.cyclical_learning_rate.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.division": "tfa.activations.division",
+ "tfa.optimizers.lamb.LAMB": "tfa.optimizers.LAMB",
+ "tfa.optimizers.lamb.LAMB.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.lamb.LAMB.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.lamb.LAMB.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.lamb.LAMB.__init__": "tfa.optimizers.LAMB.__init__",
+ "tfa.optimizers.lamb.LAMB.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.lamb.LAMB.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.lamb.LAMB.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.lamb.LAMB.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.lamb.LAMB.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.lamb.LAMB.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.lamb.LAMB.apply_gradients": "tfa.optimizers.ConditionalGradient.apply_gradients",
+ "tfa.optimizers.lamb.LAMB.get_config": "tfa.optimizers.LAMB.get_config",
+ "tfa.optimizers.lamb.LAMB.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.lamb.LAMB.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.lamb.LAMB.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.lamb.LAMB.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.lamb.LAMB.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.lamb.LAMB.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.lamb.LAMB.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.lamb.LAMB.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.lamb.LAMB.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.lamb.LAMB.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.lamb.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.lamb.division": "tfa.activations.division",
+ "tfa.optimizers.lamb.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.lazy_adam.LazyAdam": "tfa.optimizers.LazyAdam",
+ "tfa.optimizers.lazy_adam.LazyAdam.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.lazy_adam.LazyAdam.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.lazy_adam.LazyAdam.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.lazy_adam.LazyAdam.__init__": "tfa.optimizers.LazyAdam.__init__",
+ "tfa.optimizers.lazy_adam.LazyAdam.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.lazy_adam.LazyAdam.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.lazy_adam.LazyAdam.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.lazy_adam.LazyAdam.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.lazy_adam.LazyAdam.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.lazy_adam.LazyAdam.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.lazy_adam.LazyAdam.apply_gradients": "tfa.optimizers.ConditionalGradient.apply_gradients",
+ "tfa.optimizers.lazy_adam.LazyAdam.get_config": "tfa.optimizers.LazyAdam.get_config",
+ "tfa.optimizers.lazy_adam.LazyAdam.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.lazy_adam.LazyAdam.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.lazy_adam.LazyAdam.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.lazy_adam.LazyAdam.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.lazy_adam.LazyAdam.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.lazy_adam.LazyAdam.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.lazy_adam.LazyAdam.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.lazy_adam.LazyAdam.set_weights": "tfa.optimizers.AdamW.set_weights",
+ "tfa.optimizers.lazy_adam.LazyAdam.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.lazy_adam.LazyAdam.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.lazy_adam.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.lazy_adam.division": "tfa.activations.division",
+ "tfa.optimizers.lazy_adam.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.lookahead.Lookahead": "tfa.optimizers.Lookahead",
+ "tfa.optimizers.lookahead.Lookahead.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.lookahead.Lookahead.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.lookahead.Lookahead.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.lookahead.Lookahead.__init__": "tfa.optimizers.Lookahead.__init__",
+ "tfa.optimizers.lookahead.Lookahead.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.lookahead.Lookahead.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.lookahead.Lookahead.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.lookahead.Lookahead.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.lookahead.Lookahead.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.lookahead.Lookahead.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.lookahead.Lookahead.apply_gradients": "tfa.optimizers.Lookahead.apply_gradients",
+ "tfa.optimizers.lookahead.Lookahead.get_config": "tfa.optimizers.Lookahead.get_config",
+ "tfa.optimizers.lookahead.Lookahead.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.lookahead.Lookahead.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.lookahead.Lookahead.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.lookahead.Lookahead.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.lookahead.Lookahead.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.lookahead.Lookahead.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.lookahead.Lookahead.learning_rate": "tfa.optimizers.Lookahead.learning_rate",
+ "tfa.optimizers.lookahead.Lookahead.lr": "tfa.optimizers.Lookahead.lr",
+ "tfa.optimizers.lookahead.Lookahead.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.lookahead.Lookahead.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.lookahead.Lookahead.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.lookahead.Lookahead.weights": "tfa.optimizers.Lookahead.weights",
+ "tfa.optimizers.lookahead.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.lookahead.division": "tfa.activations.division",
+ "tfa.optimizers.lookahead.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.moving_average.MovingAverage": "tfa.optimizers.MovingAverage",
+ "tfa.optimizers.moving_average.MovingAverage.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.moving_average.MovingAverage.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.moving_average.MovingAverage.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.moving_average.MovingAverage.__init__": "tfa.optimizers.MovingAverage.__init__",
+ "tfa.optimizers.moving_average.MovingAverage.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.moving_average.MovingAverage.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.moving_average.MovingAverage.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.moving_average.MovingAverage.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.moving_average.MovingAverage.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.moving_average.MovingAverage.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.moving_average.MovingAverage.apply_gradients": "tfa.optimizers.AveragedOptimizerWrapper.apply_gradients",
+ "tfa.optimizers.moving_average.MovingAverage.assign_average_vars": "tfa.optimizers.AveragedOptimizerWrapper.assign_average_vars",
+ "tfa.optimizers.moving_average.MovingAverage.average_op": "tfa.optimizers.MovingAverage.average_op",
+ "tfa.optimizers.moving_average.MovingAverage.get_config": "tfa.optimizers.MovingAverage.get_config",
+ "tfa.optimizers.moving_average.MovingAverage.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.moving_average.MovingAverage.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.moving_average.MovingAverage.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.moving_average.MovingAverage.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.moving_average.MovingAverage.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.moving_average.MovingAverage.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.moving_average.MovingAverage.learning_rate": "tfa.optimizers.AveragedOptimizerWrapper.learning_rate",
+ "tfa.optimizers.moving_average.MovingAverage.lr": "tfa.optimizers.AveragedOptimizerWrapper.lr",
+ "tfa.optimizers.moving_average.MovingAverage.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.moving_average.MovingAverage.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.moving_average.MovingAverage.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.moving_average.MovingAverage.weights": "tfa.optimizers.AveragedOptimizerWrapper.weights",
+ "tfa.optimizers.moving_average.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.moving_average.division": "tfa.activations.division",
+ "tfa.optimizers.moving_average.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.rectified_adam.RectifiedAdam": "tfa.optimizers.RectifiedAdam",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__init__": "tfa.optimizers.RectifiedAdam.__init__",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.apply_gradients": "tfa.optimizers.ConditionalGradient.apply_gradients",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_config": "tfa.optimizers.RectifiedAdam.get_config",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.set_weights": "tfa.optimizers.RectifiedAdam.set_weights",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.rectified_adam.RectifiedAdam.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.rectified_adam.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.rectified_adam.division": "tfa.activations.division",
+ "tfa.optimizers.rectified_adam.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.stochastic_weight_averaging.SWA": "tfa.optimizers.SWA",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__init__": "tfa.optimizers.SWA.__init__",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.apply_gradients": "tfa.optimizers.AveragedOptimizerWrapper.apply_gradients",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.assign_average_vars": "tfa.optimizers.AveragedOptimizerWrapper.assign_average_vars",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.average_op": "tfa.optimizers.SWA.average_op",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_config": "tfa.optimizers.SWA.get_config",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.learning_rate": "tfa.optimizers.AveragedOptimizerWrapper.learning_rate",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.lr": "tfa.optimizers.AveragedOptimizerWrapper.lr",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.minimize": "tfa.optimizers.AveragedOptimizerWrapper.minimize",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.stochastic_weight_averaging.SWA.weights": "tfa.optimizers.AveragedOptimizerWrapper.weights",
+ "tfa.optimizers.stochastic_weight_averaging.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.stochastic_weight_averaging.division": "tfa.activations.division",
+ "tfa.optimizers.stochastic_weight_averaging.print_function": "tfa.activations.print_function",
+ "tfa.optimizers.weight_decay_optimizers.AdamW": "tfa.optimizers.AdamW",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__init__": "tfa.optimizers.AdamW.__init__",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.apply_gradients": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.apply_gradients",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_config": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.get_config",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.minimize": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.minimize",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.set_weights": "tfa.optimizers.AdamW.set_weights",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.weight_decay_optimizers.AdamW.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.weight_decay_optimizers.SGDW": "tfa.optimizers.SGDW",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__init__": "tfa.optimizers.SGDW.__init__",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.add_slot": "tfa.optimizers.AdamW.add_slot",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.add_weight": "tfa.optimizers.AdamW.add_weight",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.apply_gradients": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.apply_gradients",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_config": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.get_config",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_gradients": "tfa.optimizers.AdamW.get_gradients",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_slot": "tfa.optimizers.AdamW.get_slot",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_slot_names": "tfa.optimizers.AdamW.get_slot_names",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_updates": "tfa.optimizers.AdamW.get_updates",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_weights": "tfa.optimizers.AdamW.get_weights",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.iterations": "tfa.optimizers.AdamW.iterations",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.minimize": "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.minimize",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.set_weights": "tfa.optimizers.AveragedOptimizerWrapper.set_weights",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.variables": "tfa.optimizers.AdamW.variables",
+ "tfa.optimizers.weight_decay_optimizers.SGDW.weights": "tfa.optimizers.AdamW.weights",
+ "tfa.optimizers.weight_decay_optimizers.absolute_import": "tfa.activations.absolute_import",
+ "tfa.optimizers.weight_decay_optimizers.division": "tfa.activations.division",
+ "tfa.optimizers.weight_decay_optimizers.extend_with_decoupled_weight_decay": "tfa.optimizers.extend_with_decoupled_weight_decay",
+ "tfa.optimizers.weight_decay_optimizers.print_function": "tfa.activations.print_function",
+ "tfa.rnn.LayerNormLSTMCell.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.rnn.LayerNormLSTMCell.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.rnn.LayerNormLSTMCell.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.rnn.LayerNormLSTMCell.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.rnn.LayerNormLSTMCell.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.rnn.LayerNormLSTMCell.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.rnn.LayerNormLSTMCell.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.rnn.LayerNormLSTMCell.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.rnn.LayerNormLSTMCell.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.rnn.LayerNormLSTMCell.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.rnn.LayerNormLSTMCell.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.rnn.LayerNormLSTMCell.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.rnn.LayerNormLSTMCell.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.rnn.LayerNormLSTMCell.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.rnn.LayerNormLSTMCell.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.rnn.LayerNormLSTMCell.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.rnn.LayerNormLSTMCell.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.rnn.LayerNormLSTMCell.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.rnn.LayerNormLSTMCell.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.rnn.LayerNormLSTMCell.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.rnn.LayerNormLSTMCell.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.rnn.LayerNormLSTMCell.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.rnn.LayerNormLSTMCell.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.rnn.LayerNormLSTMCell.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.rnn.LayerNormLSTMCell.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.rnn.LayerNormLSTMCell.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.rnn.LayerNormLSTMCell.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.rnn.LayerNormLSTMCell.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.rnn.LayerNormLSTMCell.input": "tfa.layers.CorrelationCost.input",
+ "tfa.rnn.LayerNormLSTMCell.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.rnn.LayerNormLSTMCell.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.rnn.LayerNormLSTMCell.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.rnn.LayerNormLSTMCell.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.rnn.LayerNormLSTMCell.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.rnn.LayerNormLSTMCell.name": "tfa.layers.CorrelationCost.name",
+ "tfa.rnn.LayerNormLSTMCell.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.rnn.LayerNormLSTMCell.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.rnn.LayerNormLSTMCell.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.rnn.LayerNormLSTMCell.output": "tfa.layers.CorrelationCost.output",
+ "tfa.rnn.LayerNormLSTMCell.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.rnn.LayerNormLSTMCell.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.rnn.LayerNormLSTMCell.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.rnn.LayerNormLSTMCell.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.rnn.LayerNormLSTMCell.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.rnn.LayerNormLSTMCell.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.rnn.LayerNormLSTMCell.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.rnn.LayerNormLSTMCell.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.rnn.LayerNormLSTMCell.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.rnn.LayerNormLSTMCell.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.rnn.NASCell.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.rnn.NASCell.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.rnn.NASCell.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.rnn.NASCell.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.rnn.NASCell.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.rnn.NASCell.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.rnn.NASCell.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.rnn.NASCell.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.rnn.NASCell.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.rnn.NASCell.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.rnn.NASCell.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.rnn.NASCell.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.rnn.NASCell.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.rnn.NASCell.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.rnn.NASCell.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.rnn.NASCell.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.rnn.NASCell.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.rnn.NASCell.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.rnn.NASCell.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.rnn.NASCell.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.rnn.NASCell.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.rnn.NASCell.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.rnn.NASCell.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.rnn.NASCell.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.rnn.NASCell.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.rnn.NASCell.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.rnn.NASCell.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.rnn.NASCell.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.rnn.NASCell.input": "tfa.layers.CorrelationCost.input",
+ "tfa.rnn.NASCell.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.rnn.NASCell.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.rnn.NASCell.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.rnn.NASCell.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.rnn.NASCell.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.rnn.NASCell.name": "tfa.layers.CorrelationCost.name",
+ "tfa.rnn.NASCell.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.rnn.NASCell.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.rnn.NASCell.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.rnn.NASCell.output": "tfa.layers.CorrelationCost.output",
+ "tfa.rnn.NASCell.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.rnn.NASCell.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.rnn.NASCell.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.rnn.NASCell.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.rnn.NASCell.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.rnn.NASCell.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.rnn.NASCell.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.rnn.NASCell.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.rnn.NASCell.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.rnn.NASCell.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.rnn.absolute_import": "tfa.activations.absolute_import",
+ "tfa.rnn.cell.LayerNormLSTMCell": "tfa.rnn.LayerNormLSTMCell",
+ "tfa.rnn.cell.LayerNormLSTMCell.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.rnn.cell.LayerNormLSTMCell.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.rnn.cell.LayerNormLSTMCell.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.rnn.cell.LayerNormLSTMCell.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.rnn.cell.LayerNormLSTMCell.__init__": "tfa.rnn.LayerNormLSTMCell.__init__",
+ "tfa.rnn.cell.LayerNormLSTMCell.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.rnn.cell.LayerNormLSTMCell.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.rnn.cell.LayerNormLSTMCell.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.rnn.cell.LayerNormLSTMCell.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.rnn.cell.LayerNormLSTMCell.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.rnn.cell.LayerNormLSTMCell.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.rnn.cell.LayerNormLSTMCell.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.rnn.cell.LayerNormLSTMCell.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.rnn.cell.LayerNormLSTMCell.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.rnn.cell.LayerNormLSTMCell.build": "tfa.rnn.LayerNormLSTMCell.build",
+ "tfa.rnn.cell.LayerNormLSTMCell.call": "tfa.rnn.LayerNormLSTMCell.call",
+ "tfa.rnn.cell.LayerNormLSTMCell.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.rnn.cell.LayerNormLSTMCell.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.rnn.cell.LayerNormLSTMCell.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.rnn.cell.LayerNormLSTMCell.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.rnn.cell.LayerNormLSTMCell.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.rnn.cell.LayerNormLSTMCell.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_config": "tfa.rnn.LayerNormLSTMCell.get_config",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_dropout_mask_for_cell": "tfa.rnn.LayerNormLSTMCell.get_dropout_mask_for_cell",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_initial_state": "tfa.rnn.LayerNormLSTMCell.get_initial_state",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_recurrent_dropout_mask_for_cell": "tfa.rnn.LayerNormLSTMCell.get_recurrent_dropout_mask_for_cell",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.rnn.cell.LayerNormLSTMCell.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.rnn.cell.LayerNormLSTMCell.input": "tfa.layers.CorrelationCost.input",
+ "tfa.rnn.cell.LayerNormLSTMCell.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.rnn.cell.LayerNormLSTMCell.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.rnn.cell.LayerNormLSTMCell.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.rnn.cell.LayerNormLSTMCell.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.rnn.cell.LayerNormLSTMCell.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.rnn.cell.LayerNormLSTMCell.name": "tfa.layers.CorrelationCost.name",
+ "tfa.rnn.cell.LayerNormLSTMCell.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.rnn.cell.LayerNormLSTMCell.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.rnn.cell.LayerNormLSTMCell.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.rnn.cell.LayerNormLSTMCell.output": "tfa.layers.CorrelationCost.output",
+ "tfa.rnn.cell.LayerNormLSTMCell.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.rnn.cell.LayerNormLSTMCell.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.rnn.cell.LayerNormLSTMCell.reset_dropout_mask": "tfa.rnn.LayerNormLSTMCell.reset_dropout_mask",
+ "tfa.rnn.cell.LayerNormLSTMCell.reset_recurrent_dropout_mask": "tfa.rnn.LayerNormLSTMCell.reset_recurrent_dropout_mask",
+ "tfa.rnn.cell.LayerNormLSTMCell.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.rnn.cell.LayerNormLSTMCell.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.rnn.cell.LayerNormLSTMCell.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.rnn.cell.LayerNormLSTMCell.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.rnn.cell.LayerNormLSTMCell.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.rnn.cell.LayerNormLSTMCell.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.rnn.cell.LayerNormLSTMCell.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.rnn.cell.LayerNormLSTMCell.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.rnn.cell.NASCell": "tfa.rnn.NASCell",
+ "tfa.rnn.cell.NASCell.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.rnn.cell.NASCell.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.rnn.cell.NASCell.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.rnn.cell.NASCell.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.rnn.cell.NASCell.__init__": "tfa.rnn.NASCell.__init__",
+ "tfa.rnn.cell.NASCell.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.rnn.cell.NASCell.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.rnn.cell.NASCell.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.rnn.cell.NASCell.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.rnn.cell.NASCell.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.rnn.cell.NASCell.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.rnn.cell.NASCell.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.rnn.cell.NASCell.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.rnn.cell.NASCell.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.rnn.cell.NASCell.build": "tfa.rnn.NASCell.build",
+ "tfa.rnn.cell.NASCell.call": "tfa.rnn.NASCell.call",
+ "tfa.rnn.cell.NASCell.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.rnn.cell.NASCell.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.rnn.cell.NASCell.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.rnn.cell.NASCell.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.rnn.cell.NASCell.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.rnn.cell.NASCell.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.rnn.cell.NASCell.get_config": "tfa.rnn.NASCell.get_config",
+ "tfa.rnn.cell.NASCell.get_initial_state": "tfa.rnn.NASCell.get_initial_state",
+ "tfa.rnn.cell.NASCell.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.rnn.cell.NASCell.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.rnn.cell.NASCell.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.rnn.cell.NASCell.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.rnn.cell.NASCell.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.rnn.cell.NASCell.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.rnn.cell.NASCell.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.rnn.cell.NASCell.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.rnn.cell.NASCell.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.rnn.cell.NASCell.input": "tfa.layers.CorrelationCost.input",
+ "tfa.rnn.cell.NASCell.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.rnn.cell.NASCell.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.rnn.cell.NASCell.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.rnn.cell.NASCell.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.rnn.cell.NASCell.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.rnn.cell.NASCell.name": "tfa.layers.CorrelationCost.name",
+ "tfa.rnn.cell.NASCell.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.rnn.cell.NASCell.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.rnn.cell.NASCell.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.rnn.cell.NASCell.output": "tfa.layers.CorrelationCost.output",
+ "tfa.rnn.cell.NASCell.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.rnn.cell.NASCell.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.rnn.cell.NASCell.output_size": "tfa.rnn.NASCell.output_size",
+ "tfa.rnn.cell.NASCell.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.rnn.cell.NASCell.state_size": "tfa.rnn.NASCell.state_size",
+ "tfa.rnn.cell.NASCell.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.rnn.cell.NASCell.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.rnn.cell.NASCell.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.rnn.cell.NASCell.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.rnn.cell.NASCell.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.rnn.cell.NASCell.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.rnn.cell.NASCell.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.rnn.cell.absolute_import": "tfa.activations.absolute_import",
+ "tfa.rnn.cell.division": "tfa.activations.division",
+ "tfa.rnn.cell.print_function": "tfa.activations.print_function",
+ "tfa.rnn.division": "tfa.activations.division",
+ "tfa.rnn.print_function": "tfa.activations.print_function",
+ "tfa.seq2seq.AttentionMechanism.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.AttentionMechanism.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.AttentionMechanism.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.AttentionMechanism.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.AttentionMechanism.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.AttentionMechanism.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.AttentionMechanism.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.AttentionWrapper.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.seq2seq.AttentionWrapper.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.AttentionWrapper.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.AttentionWrapper.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.AttentionWrapper.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.AttentionWrapper.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.AttentionWrapper.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.AttentionWrapper.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.AttentionWrapper.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.AttentionWrapper.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.AttentionWrapper.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.AttentionWrapper.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.AttentionWrapper.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.AttentionWrapper.build": "tfa.layers.GELU.build",
+ "tfa.seq2seq.AttentionWrapper.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.seq2seq.AttentionWrapper.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.AttentionWrapper.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.AttentionWrapper.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.AttentionWrapper.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.AttentionWrapper.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.AttentionWrapper.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.AttentionWrapper.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.AttentionWrapper.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.AttentionWrapper.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.AttentionWrapper.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.AttentionWrapper.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.AttentionWrapper.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.AttentionWrapper.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.AttentionWrapper.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.AttentionWrapper.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.AttentionWrapper.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.AttentionWrapper.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.AttentionWrapper.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.AttentionWrapper.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.AttentionWrapper.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.AttentionWrapper.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.AttentionWrapper.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.AttentionWrapper.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.AttentionWrapper.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.AttentionWrapper.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.AttentionWrapper.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.AttentionWrapper.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.AttentionWrapper.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.AttentionWrapper.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.AttentionWrapper.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.AttentionWrapper.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.AttentionWrapper.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.AttentionWrapper.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.AttentionWrapper.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.AttentionWrapper.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.AttentionWrapperState.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.BahdanauAttention.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.BahdanauAttention.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.BahdanauAttention.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.BahdanauAttention.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.BahdanauAttention.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.BahdanauAttention.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.BahdanauAttention.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.BahdanauAttention.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.BahdanauAttention.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.BahdanauAttention.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.BahdanauAttention.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.BahdanauAttention.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.BahdanauAttention.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.BahdanauAttention.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.BahdanauAttention.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.BahdanauAttention.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.BahdanauAttention.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.BahdanauAttention.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.BahdanauAttention.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.BahdanauAttention.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.BahdanauAttention.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.BahdanauAttention.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.BahdanauAttention.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.BahdanauAttention.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.BahdanauAttention.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.BahdanauAttention.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.BahdanauAttention.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.BahdanauAttention.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.BahdanauAttention.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.BahdanauAttention.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.BahdanauAttention.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.BahdanauAttention.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.BahdanauAttention.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.BahdanauAttention.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.BahdanauAttention.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.BahdanauAttention.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.BahdanauAttention.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.BahdanauAttention.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.BahdanauAttention.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.BahdanauAttention.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.BahdanauAttention.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.BahdanauAttention.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.BahdanauAttention.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.BahdanauAttention.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.BahdanauAttention.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.BahdanauAttention.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.BahdanauAttention.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.BahdanauMonotonicAttention.__call__": "tfa.seq2seq.BahdanauAttention.__call__",
+ "tfa.seq2seq.BahdanauMonotonicAttention.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.BahdanauMonotonicAttention.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.BahdanauMonotonicAttention.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.BahdanauMonotonicAttention.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.BahdanauMonotonicAttention.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.BahdanauMonotonicAttention.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.BahdanauMonotonicAttention.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.BahdanauMonotonicAttention.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.BahdanauMonotonicAttention.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.BahdanauMonotonicAttention.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.BahdanauMonotonicAttention.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.BahdanauMonotonicAttention.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.BahdanauMonotonicAttention.alignments_size": "tfa.seq2seq.BahdanauAttention.alignments_size",
+ "tfa.seq2seq.BahdanauMonotonicAttention.call": "tfa.seq2seq.BahdanauAttention.call",
+ "tfa.seq2seq.BahdanauMonotonicAttention.compute_mask": "tfa.seq2seq.BahdanauAttention.compute_mask",
+ "tfa.seq2seq.BahdanauMonotonicAttention.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.BahdanauMonotonicAttention.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.BahdanauMonotonicAttention.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.BahdanauMonotonicAttention.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.BahdanauMonotonicAttention.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.BahdanauMonotonicAttention.initial_state": "tfa.seq2seq.BahdanauAttention.initial_state",
+ "tfa.seq2seq.BahdanauMonotonicAttention.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.BahdanauMonotonicAttention.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.BahdanauMonotonicAttention.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.BahdanauMonotonicAttention.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.BahdanauMonotonicAttention.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.BahdanauMonotonicAttention.memory_initialized": "tfa.seq2seq.BahdanauAttention.memory_initialized",
+ "tfa.seq2seq.BahdanauMonotonicAttention.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.BahdanauMonotonicAttention.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.BahdanauMonotonicAttention.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.BahdanauMonotonicAttention.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.BahdanauMonotonicAttention.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.BahdanauMonotonicAttention.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.BahdanauMonotonicAttention.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.BahdanauMonotonicAttention.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.BahdanauMonotonicAttention.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.BahdanauMonotonicAttention.setup_memory": "tfa.seq2seq.BahdanauAttention.setup_memory",
+ "tfa.seq2seq.BahdanauMonotonicAttention.state_size": "tfa.seq2seq.BahdanauAttention.state_size",
+ "tfa.seq2seq.BahdanauMonotonicAttention.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.BahdanauMonotonicAttention.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.BahdanauMonotonicAttention.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.BahdanauMonotonicAttention.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.BahdanauMonotonicAttention.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.BahdanauMonotonicAttention.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.BahdanauMonotonicAttention.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.BaseDecoder.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.seq2seq.BaseDecoder.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.BaseDecoder.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.BaseDecoder.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.BaseDecoder.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.BaseDecoder.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.BaseDecoder.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.BaseDecoder.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.BaseDecoder.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.BaseDecoder.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.BaseDecoder.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.BaseDecoder.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.BaseDecoder.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.BaseDecoder.build": "tfa.layers.GELU.build",
+ "tfa.seq2seq.BaseDecoder.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.seq2seq.BaseDecoder.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.BaseDecoder.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.BaseDecoder.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.BaseDecoder.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.BaseDecoder.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.BaseDecoder.get_config": "tfa.seq2seq.AttentionWrapper.get_config",
+ "tfa.seq2seq.BaseDecoder.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.BaseDecoder.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.BaseDecoder.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.BaseDecoder.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.BaseDecoder.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.BaseDecoder.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.BaseDecoder.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.BaseDecoder.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.BaseDecoder.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.BaseDecoder.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.BaseDecoder.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.BaseDecoder.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.BaseDecoder.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.BaseDecoder.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.BaseDecoder.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.BaseDecoder.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.BaseDecoder.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.BaseDecoder.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.BaseDecoder.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.BaseDecoder.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.BaseDecoder.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.BaseDecoder.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.BaseDecoder.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.BaseDecoder.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.BaseDecoder.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.BaseDecoder.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.BaseDecoder.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.BaseDecoder.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.BaseDecoder.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.BaseDecoder.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.BasicDecoder.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.seq2seq.BasicDecoder.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.BasicDecoder.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.BasicDecoder.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.BasicDecoder.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.BasicDecoder.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.BasicDecoder.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.BasicDecoder.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.BasicDecoder.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.BasicDecoder.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.BasicDecoder.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.BasicDecoder.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.BasicDecoder.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.BasicDecoder.build": "tfa.layers.GELU.build",
+ "tfa.seq2seq.BasicDecoder.call": "tfa.seq2seq.BaseDecoder.call",
+ "tfa.seq2seq.BasicDecoder.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.seq2seq.BasicDecoder.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.BasicDecoder.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.BasicDecoder.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.BasicDecoder.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.BasicDecoder.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.BasicDecoder.finalize": "tfa.seq2seq.BaseDecoder.finalize",
+ "tfa.seq2seq.BasicDecoder.get_config": "tfa.seq2seq.AttentionWrapper.get_config",
+ "tfa.seq2seq.BasicDecoder.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.BasicDecoder.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.BasicDecoder.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.BasicDecoder.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.BasicDecoder.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.BasicDecoder.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.BasicDecoder.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.BasicDecoder.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.BasicDecoder.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.BasicDecoder.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.BasicDecoder.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.BasicDecoder.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.BasicDecoder.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.BasicDecoder.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.BasicDecoder.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.BasicDecoder.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.BasicDecoder.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.BasicDecoder.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.BasicDecoder.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.BasicDecoder.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.BasicDecoder.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.BasicDecoder.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.BasicDecoder.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.BasicDecoder.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.BasicDecoder.tracks_own_finished": "tfa.seq2seq.BaseDecoder.tracks_own_finished",
+ "tfa.seq2seq.BasicDecoder.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.BasicDecoder.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.BasicDecoder.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.BasicDecoder.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.BasicDecoder.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.BasicDecoder.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.BasicDecoderOutput.__add__": "tfa.seq2seq.AttentionWrapperState.__add__",
+ "tfa.seq2seq.BasicDecoderOutput.__contains__": "tfa.seq2seq.AttentionWrapperState.__contains__",
+ "tfa.seq2seq.BasicDecoderOutput.__eq__": "tfa.seq2seq.AttentionWrapperState.__eq__",
+ "tfa.seq2seq.BasicDecoderOutput.__ge__": "tfa.seq2seq.AttentionWrapperState.__ge__",
+ "tfa.seq2seq.BasicDecoderOutput.__getitem__": "tfa.seq2seq.AttentionWrapperState.__getitem__",
+ "tfa.seq2seq.BasicDecoderOutput.__gt__": "tfa.seq2seq.AttentionWrapperState.__gt__",
+ "tfa.seq2seq.BasicDecoderOutput.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.BasicDecoderOutput.__iter__": "tfa.seq2seq.AttentionWrapperState.__iter__",
+ "tfa.seq2seq.BasicDecoderOutput.__le__": "tfa.seq2seq.AttentionWrapperState.__le__",
+ "tfa.seq2seq.BasicDecoderOutput.__len__": "tfa.seq2seq.AttentionWrapperState.__len__",
+ "tfa.seq2seq.BasicDecoderOutput.__lt__": "tfa.seq2seq.AttentionWrapperState.__lt__",
+ "tfa.seq2seq.BasicDecoderOutput.__mul__": "tfa.seq2seq.AttentionWrapperState.__mul__",
+ "tfa.seq2seq.BasicDecoderOutput.__ne__": "tfa.seq2seq.AttentionWrapperState.__ne__",
+ "tfa.seq2seq.BasicDecoderOutput.__rmul__": "tfa.seq2seq.AttentionWrapperState.__rmul__",
+ "tfa.seq2seq.BasicDecoderOutput.count": "tfa.seq2seq.AttentionWrapperState.count",
+ "tfa.seq2seq.BasicDecoderOutput.index": "tfa.seq2seq.AttentionWrapperState.index",
+ "tfa.seq2seq.BeamSearchDecoder.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.seq2seq.BeamSearchDecoder.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.BeamSearchDecoder.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.BeamSearchDecoder.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.BeamSearchDecoder.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.BeamSearchDecoder.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.BeamSearchDecoder.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.BeamSearchDecoder.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.BeamSearchDecoder.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.BeamSearchDecoder.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.BeamSearchDecoder.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.BeamSearchDecoder.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.BeamSearchDecoder.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.BeamSearchDecoder.batch_size": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.batch_size",
+ "tfa.seq2seq.BeamSearchDecoder.build": "tfa.layers.GELU.build",
+ "tfa.seq2seq.BeamSearchDecoder.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.seq2seq.BeamSearchDecoder.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.BeamSearchDecoder.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.BeamSearchDecoder.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.BeamSearchDecoder.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.BeamSearchDecoder.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.BeamSearchDecoder.finalize": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.finalize",
+ "tfa.seq2seq.BeamSearchDecoder.get_config": "tfa.seq2seq.AttentionWrapper.get_config",
+ "tfa.seq2seq.BeamSearchDecoder.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.BeamSearchDecoder.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.BeamSearchDecoder.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.BeamSearchDecoder.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.BeamSearchDecoder.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.BeamSearchDecoder.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.BeamSearchDecoder.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.BeamSearchDecoder.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.BeamSearchDecoder.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.BeamSearchDecoder.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.BeamSearchDecoder.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.BeamSearchDecoder.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.BeamSearchDecoder.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.BeamSearchDecoder.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.BeamSearchDecoder.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.BeamSearchDecoder.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.BeamSearchDecoder.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.BeamSearchDecoder.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.BeamSearchDecoder.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.BeamSearchDecoder.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.BeamSearchDecoder.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.BeamSearchDecoder.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.BeamSearchDecoder.output_size": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.output_size",
+ "tfa.seq2seq.BeamSearchDecoder.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.BeamSearchDecoder.step": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.step",
+ "tfa.seq2seq.BeamSearchDecoder.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.BeamSearchDecoder.tracks_own_finished": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.tracks_own_finished",
+ "tfa.seq2seq.BeamSearchDecoder.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.BeamSearchDecoder.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.BeamSearchDecoder.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.BeamSearchDecoder.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.BeamSearchDecoder.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.BeamSearchDecoder.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__add__": "tfa.seq2seq.AttentionWrapperState.__add__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__contains__": "tfa.seq2seq.AttentionWrapperState.__contains__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__eq__": "tfa.seq2seq.AttentionWrapperState.__eq__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__ge__": "tfa.seq2seq.AttentionWrapperState.__ge__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__getitem__": "tfa.seq2seq.AttentionWrapperState.__getitem__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__gt__": "tfa.seq2seq.AttentionWrapperState.__gt__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__iter__": "tfa.seq2seq.AttentionWrapperState.__iter__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__le__": "tfa.seq2seq.AttentionWrapperState.__le__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__len__": "tfa.seq2seq.AttentionWrapperState.__len__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__lt__": "tfa.seq2seq.AttentionWrapperState.__lt__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__mul__": "tfa.seq2seq.AttentionWrapperState.__mul__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__ne__": "tfa.seq2seq.AttentionWrapperState.__ne__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.__rmul__": "tfa.seq2seq.AttentionWrapperState.__rmul__",
+ "tfa.seq2seq.BeamSearchDecoderOutput.count": "tfa.seq2seq.AttentionWrapperState.count",
+ "tfa.seq2seq.BeamSearchDecoderOutput.index": "tfa.seq2seq.AttentionWrapperState.index",
+ "tfa.seq2seq.BeamSearchDecoderState.__add__": "tfa.seq2seq.AttentionWrapperState.__add__",
+ "tfa.seq2seq.BeamSearchDecoderState.__contains__": "tfa.seq2seq.AttentionWrapperState.__contains__",
+ "tfa.seq2seq.BeamSearchDecoderState.__eq__": "tfa.seq2seq.AttentionWrapperState.__eq__",
+ "tfa.seq2seq.BeamSearchDecoderState.__ge__": "tfa.seq2seq.AttentionWrapperState.__ge__",
+ "tfa.seq2seq.BeamSearchDecoderState.__getitem__": "tfa.seq2seq.AttentionWrapperState.__getitem__",
+ "tfa.seq2seq.BeamSearchDecoderState.__gt__": "tfa.seq2seq.AttentionWrapperState.__gt__",
+ "tfa.seq2seq.BeamSearchDecoderState.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.BeamSearchDecoderState.__iter__": "tfa.seq2seq.AttentionWrapperState.__iter__",
+ "tfa.seq2seq.BeamSearchDecoderState.__le__": "tfa.seq2seq.AttentionWrapperState.__le__",
+ "tfa.seq2seq.BeamSearchDecoderState.__len__": "tfa.seq2seq.AttentionWrapperState.__len__",
+ "tfa.seq2seq.BeamSearchDecoderState.__lt__": "tfa.seq2seq.AttentionWrapperState.__lt__",
+ "tfa.seq2seq.BeamSearchDecoderState.__mul__": "tfa.seq2seq.AttentionWrapperState.__mul__",
+ "tfa.seq2seq.BeamSearchDecoderState.__ne__": "tfa.seq2seq.AttentionWrapperState.__ne__",
+ "tfa.seq2seq.BeamSearchDecoderState.__rmul__": "tfa.seq2seq.AttentionWrapperState.__rmul__",
+ "tfa.seq2seq.BeamSearchDecoderState.count": "tfa.seq2seq.AttentionWrapperState.count",
+ "tfa.seq2seq.BeamSearchDecoderState.index": "tfa.seq2seq.AttentionWrapperState.index",
+ "tfa.seq2seq.CustomSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.CustomSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.CustomSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.CustomSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.CustomSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.CustomSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.CustomSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.Decoder.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.Decoder.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.Decoder.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.Decoder.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.Decoder.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.Decoder.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.Decoder.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.Decoder.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__add__": "tfa.seq2seq.AttentionWrapperState.__add__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__contains__": "tfa.seq2seq.AttentionWrapperState.__contains__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__eq__": "tfa.seq2seq.AttentionWrapperState.__eq__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__ge__": "tfa.seq2seq.AttentionWrapperState.__ge__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__getitem__": "tfa.seq2seq.AttentionWrapperState.__getitem__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__gt__": "tfa.seq2seq.AttentionWrapperState.__gt__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__iter__": "tfa.seq2seq.AttentionWrapperState.__iter__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__le__": "tfa.seq2seq.AttentionWrapperState.__le__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__len__": "tfa.seq2seq.AttentionWrapperState.__len__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__lt__": "tfa.seq2seq.AttentionWrapperState.__lt__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__mul__": "tfa.seq2seq.AttentionWrapperState.__mul__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__ne__": "tfa.seq2seq.AttentionWrapperState.__ne__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__rmul__": "tfa.seq2seq.AttentionWrapperState.__rmul__",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.count": "tfa.seq2seq.AttentionWrapperState.count",
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.index": "tfa.seq2seq.AttentionWrapperState.index",
+ "tfa.seq2seq.GreedyEmbeddingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.GreedyEmbeddingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.GreedyEmbeddingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.GreedyEmbeddingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.GreedyEmbeddingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.GreedyEmbeddingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.GreedyEmbeddingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.InferenceSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.InferenceSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.InferenceSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.InferenceSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.InferenceSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.InferenceSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.InferenceSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.LuongAttention.__call__": "tfa.seq2seq.BahdanauAttention.__call__",
+ "tfa.seq2seq.LuongAttention.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.LuongAttention.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.LuongAttention.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.LuongAttention.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.LuongAttention.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.LuongAttention.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.LuongAttention.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.LuongAttention.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.LuongAttention.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.LuongAttention.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.LuongAttention.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.LuongAttention.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.LuongAttention.alignments_size": "tfa.seq2seq.BahdanauAttention.alignments_size",
+ "tfa.seq2seq.LuongAttention.call": "tfa.seq2seq.BahdanauAttention.call",
+ "tfa.seq2seq.LuongAttention.compute_mask": "tfa.seq2seq.BahdanauAttention.compute_mask",
+ "tfa.seq2seq.LuongAttention.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.LuongAttention.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.LuongAttention.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.LuongAttention.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.LuongAttention.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.LuongAttention.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.LuongAttention.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.LuongAttention.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.LuongAttention.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.LuongAttention.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.LuongAttention.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.LuongAttention.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.LuongAttention.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.LuongAttention.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.LuongAttention.initial_alignments": "tfa.seq2seq.BahdanauAttention.initial_alignments",
+ "tfa.seq2seq.LuongAttention.initial_state": "tfa.seq2seq.BahdanauAttention.initial_state",
+ "tfa.seq2seq.LuongAttention.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.LuongAttention.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.LuongAttention.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.LuongAttention.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.LuongAttention.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.LuongAttention.memory_initialized": "tfa.seq2seq.BahdanauAttention.memory_initialized",
+ "tfa.seq2seq.LuongAttention.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.LuongAttention.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.LuongAttention.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.LuongAttention.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.LuongAttention.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.LuongAttention.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.LuongAttention.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.LuongAttention.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.LuongAttention.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.LuongAttention.setup_memory": "tfa.seq2seq.BahdanauAttention.setup_memory",
+ "tfa.seq2seq.LuongAttention.state_size": "tfa.seq2seq.BahdanauAttention.state_size",
+ "tfa.seq2seq.LuongAttention.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.LuongAttention.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.LuongAttention.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.LuongAttention.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.LuongAttention.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.LuongAttention.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.LuongAttention.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.LuongMonotonicAttention.__call__": "tfa.seq2seq.BahdanauAttention.__call__",
+ "tfa.seq2seq.LuongMonotonicAttention.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.LuongMonotonicAttention.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.LuongMonotonicAttention.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.LuongMonotonicAttention.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.LuongMonotonicAttention.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.LuongMonotonicAttention.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.LuongMonotonicAttention.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.LuongMonotonicAttention.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.LuongMonotonicAttention.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.LuongMonotonicAttention.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.LuongMonotonicAttention.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.LuongMonotonicAttention.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.LuongMonotonicAttention.alignments_size": "tfa.seq2seq.BahdanauAttention.alignments_size",
+ "tfa.seq2seq.LuongMonotonicAttention.call": "tfa.seq2seq.BahdanauAttention.call",
+ "tfa.seq2seq.LuongMonotonicAttention.compute_mask": "tfa.seq2seq.BahdanauAttention.compute_mask",
+ "tfa.seq2seq.LuongMonotonicAttention.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.LuongMonotonicAttention.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.LuongMonotonicAttention.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.LuongMonotonicAttention.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.LuongMonotonicAttention.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.LuongMonotonicAttention.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.LuongMonotonicAttention.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.LuongMonotonicAttention.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.LuongMonotonicAttention.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.LuongMonotonicAttention.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.LuongMonotonicAttention.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.LuongMonotonicAttention.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.LuongMonotonicAttention.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.LuongMonotonicAttention.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.LuongMonotonicAttention.initial_alignments": "tfa.seq2seq.BahdanauMonotonicAttention.initial_alignments",
+ "tfa.seq2seq.LuongMonotonicAttention.initial_state": "tfa.seq2seq.BahdanauAttention.initial_state",
+ "tfa.seq2seq.LuongMonotonicAttention.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.LuongMonotonicAttention.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.LuongMonotonicAttention.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.LuongMonotonicAttention.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.LuongMonotonicAttention.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.LuongMonotonicAttention.memory_initialized": "tfa.seq2seq.BahdanauAttention.memory_initialized",
+ "tfa.seq2seq.LuongMonotonicAttention.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.LuongMonotonicAttention.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.LuongMonotonicAttention.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.LuongMonotonicAttention.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.LuongMonotonicAttention.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.LuongMonotonicAttention.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.LuongMonotonicAttention.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.LuongMonotonicAttention.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.LuongMonotonicAttention.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.LuongMonotonicAttention.setup_memory": "tfa.seq2seq.BahdanauAttention.setup_memory",
+ "tfa.seq2seq.LuongMonotonicAttention.state_size": "tfa.seq2seq.BahdanauAttention.state_size",
+ "tfa.seq2seq.LuongMonotonicAttention.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.LuongMonotonicAttention.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.LuongMonotonicAttention.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.LuongMonotonicAttention.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.LuongMonotonicAttention.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.LuongMonotonicAttention.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.LuongMonotonicAttention.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.SampleEmbeddingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.SampleEmbeddingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.SampleEmbeddingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.SampleEmbeddingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.SampleEmbeddingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.SampleEmbeddingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.SampleEmbeddingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.SampleEmbeddingSampler.batch_size": "tfa.seq2seq.GreedyEmbeddingSampler.batch_size",
+ "tfa.seq2seq.SampleEmbeddingSampler.initialize": "tfa.seq2seq.GreedyEmbeddingSampler.initialize",
+ "tfa.seq2seq.SampleEmbeddingSampler.next_inputs": "tfa.seq2seq.GreedyEmbeddingSampler.next_inputs",
+ "tfa.seq2seq.SampleEmbeddingSampler.sample_ids_dtype": "tfa.seq2seq.GreedyEmbeddingSampler.sample_ids_dtype",
+ "tfa.seq2seq.SampleEmbeddingSampler.sample_ids_shape": "tfa.seq2seq.GreedyEmbeddingSampler.sample_ids_shape",
+ "tfa.seq2seq.Sampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.Sampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.Sampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.Sampler.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.Sampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.Sampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.Sampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.Sampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.batch_size": "tfa.seq2seq.TrainingSampler.batch_size",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.sample_ids_dtype": "tfa.seq2seq.TrainingSampler.sample_ids_dtype",
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.sample_ids_shape": "tfa.seq2seq.TrainingSampler.sample_ids_shape",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.batch_size": "tfa.seq2seq.TrainingSampler.batch_size",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.sample_ids_dtype": "tfa.seq2seq.TrainingSampler.sample_ids_dtype",
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.sample_ids_shape": "tfa.seq2seq.TrainingSampler.sample_ids_shape",
+ "tfa.seq2seq.SequenceLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.SequenceLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.SequenceLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.SequenceLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.SequenceLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.SequenceLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.SequenceLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.SequenceLoss.get_config": "tfa.losses.NpairsLoss.get_config",
+ "tfa.seq2seq.TrainingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.TrainingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.TrainingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.TrainingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.TrainingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.TrainingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.TrainingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.absolute_import": "tfa.activations.absolute_import",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism": "tfa.seq2seq.AttentionMechanism",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.alignments_size": "tfa.seq2seq.AttentionMechanism.alignments_size",
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.state_size": "tfa.seq2seq.AttentionMechanism.state_size",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper": "tfa.seq2seq.AttentionWrapper",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__init__": "tfa.seq2seq.AttentionWrapper.__init__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.build": "tfa.layers.GELU.build",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.call": "tfa.seq2seq.AttentionWrapper.call",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_config": "tfa.seq2seq.AttentionWrapper.get_config",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_initial_state": "tfa.seq2seq.AttentionWrapper.get_initial_state",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.output_size": "tfa.seq2seq.AttentionWrapper.output_size",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.state_size": "tfa.seq2seq.AttentionWrapper.state_size",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState": "tfa.seq2seq.AttentionWrapperState",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__add__": "tfa.seq2seq.AttentionWrapperState.__add__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__contains__": "tfa.seq2seq.AttentionWrapperState.__contains__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__eq__": "tfa.seq2seq.AttentionWrapperState.__eq__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__ge__": "tfa.seq2seq.AttentionWrapperState.__ge__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__getitem__": "tfa.seq2seq.AttentionWrapperState.__getitem__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__gt__": "tfa.seq2seq.AttentionWrapperState.__gt__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__iter__": "tfa.seq2seq.AttentionWrapperState.__iter__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__le__": "tfa.seq2seq.AttentionWrapperState.__le__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__len__": "tfa.seq2seq.AttentionWrapperState.__len__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__lt__": "tfa.seq2seq.AttentionWrapperState.__lt__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__mul__": "tfa.seq2seq.AttentionWrapperState.__mul__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__ne__": "tfa.seq2seq.AttentionWrapperState.__ne__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__new__": "tfa.seq2seq.AttentionWrapperState.__new__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__rmul__": "tfa.seq2seq.AttentionWrapperState.__rmul__",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.alignment_history": "tfa.seq2seq.AttentionWrapperState.alignment_history",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.alignments": "tfa.seq2seq.AttentionWrapperState.alignments",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.attention": "tfa.seq2seq.AttentionWrapperState.attention",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.attention_state": "tfa.seq2seq.AttentionWrapperState.attention_state",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.cell_state": "tfa.seq2seq.AttentionWrapperState.cell_state",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.clone": "tfa.seq2seq.AttentionWrapperState.clone",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.count": "tfa.seq2seq.AttentionWrapperState.count",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.index": "tfa.seq2seq.AttentionWrapperState.index",
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.time": "tfa.seq2seq.AttentionWrapperState.time",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention": "tfa.seq2seq.BahdanauAttention",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__call__": "tfa.seq2seq.BahdanauAttention.__call__",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__init__": "tfa.seq2seq.BahdanauAttention.__init__",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.alignments_size": "tfa.seq2seq.BahdanauAttention.alignments_size",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.build": "tfa.seq2seq.BahdanauAttention.build",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.call": "tfa.seq2seq.BahdanauAttention.call",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.compute_mask": "tfa.seq2seq.BahdanauAttention.compute_mask",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_config": "tfa.seq2seq.BahdanauAttention.get_config",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.initial_alignments": "tfa.seq2seq.BahdanauAttention.initial_alignments",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.initial_state": "tfa.seq2seq.BahdanauAttention.initial_state",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.memory_initialized": "tfa.seq2seq.BahdanauAttention.memory_initialized",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.setup_memory": "tfa.seq2seq.BahdanauAttention.setup_memory",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.state_size": "tfa.seq2seq.BahdanauAttention.state_size",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention": "tfa.seq2seq.BahdanauMonotonicAttention",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__call__": "tfa.seq2seq.BahdanauAttention.__call__",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__init__": "tfa.seq2seq.BahdanauMonotonicAttention.__init__",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.alignments_size": "tfa.seq2seq.BahdanauAttention.alignments_size",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.build": "tfa.seq2seq.BahdanauMonotonicAttention.build",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.call": "tfa.seq2seq.BahdanauAttention.call",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.compute_mask": "tfa.seq2seq.BahdanauAttention.compute_mask",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_config": "tfa.seq2seq.BahdanauMonotonicAttention.get_config",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.initial_alignments": "tfa.seq2seq.BahdanauMonotonicAttention.initial_alignments",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.initial_state": "tfa.seq2seq.BahdanauAttention.initial_state",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.memory_initialized": "tfa.seq2seq.BahdanauAttention.memory_initialized",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.setup_memory": "tfa.seq2seq.BahdanauAttention.setup_memory",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.state_size": "tfa.seq2seq.BahdanauAttention.state_size",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.attention_wrapper.LuongAttention": "tfa.seq2seq.LuongAttention",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__call__": "tfa.seq2seq.BahdanauAttention.__call__",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__init__": "tfa.seq2seq.LuongAttention.__init__",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.alignments_size": "tfa.seq2seq.BahdanauAttention.alignments_size",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.build": "tfa.seq2seq.LuongAttention.build",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.call": "tfa.seq2seq.BahdanauAttention.call",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.compute_mask": "tfa.seq2seq.BahdanauAttention.compute_mask",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_config": "tfa.seq2seq.LuongAttention.get_config",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.initial_alignments": "tfa.seq2seq.BahdanauAttention.initial_alignments",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.initial_state": "tfa.seq2seq.BahdanauAttention.initial_state",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.memory_initialized": "tfa.seq2seq.BahdanauAttention.memory_initialized",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.setup_memory": "tfa.seq2seq.BahdanauAttention.setup_memory",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.state_size": "tfa.seq2seq.BahdanauAttention.state_size",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.attention_wrapper.LuongAttention.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention": "tfa.seq2seq.LuongMonotonicAttention",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__call__": "tfa.seq2seq.BahdanauAttention.__call__",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__init__": "tfa.seq2seq.LuongMonotonicAttention.__init__",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.alignments_size": "tfa.seq2seq.BahdanauAttention.alignments_size",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.build": "tfa.seq2seq.LuongMonotonicAttention.build",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.call": "tfa.seq2seq.BahdanauAttention.call",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.compute_mask": "tfa.seq2seq.BahdanauAttention.compute_mask",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_config": "tfa.seq2seq.LuongMonotonicAttention.get_config",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.initial_alignments": "tfa.seq2seq.BahdanauMonotonicAttention.initial_alignments",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.initial_state": "tfa.seq2seq.BahdanauAttention.initial_state",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.memory_initialized": "tfa.seq2seq.BahdanauAttention.memory_initialized",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.setup_memory": "tfa.seq2seq.BahdanauAttention.setup_memory",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.state_size": "tfa.seq2seq.BahdanauAttention.state_size",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.attention_wrapper.absolute_import": "tfa.activations.absolute_import",
+ "tfa.seq2seq.attention_wrapper.division": "tfa.activations.division",
+ "tfa.seq2seq.attention_wrapper.hardmax": "tfa.seq2seq.hardmax",
+ "tfa.seq2seq.attention_wrapper.monotonic_attention": "tfa.seq2seq.monotonic_attention",
+ "tfa.seq2seq.attention_wrapper.print_function": "tfa.activations.print_function",
+ "tfa.seq2seq.attention_wrapper.safe_cumprod": "tfa.seq2seq.safe_cumprod",
+ "tfa.seq2seq.basic_decoder.BasicDecoder": "tfa.seq2seq.BasicDecoder",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__init__": "tfa.seq2seq.BasicDecoder.__init__",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.batch_size": "tfa.seq2seq.BasicDecoder.batch_size",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.build": "tfa.layers.GELU.build",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.call": "tfa.seq2seq.BaseDecoder.call",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.finalize": "tfa.seq2seq.BaseDecoder.finalize",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_config": "tfa.seq2seq.AttentionWrapper.get_config",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.initialize": "tfa.seq2seq.BasicDecoder.initialize",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output_dtype": "tfa.seq2seq.BasicDecoder.output_dtype",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output_size": "tfa.seq2seq.BasicDecoder.output_size",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.step": "tfa.seq2seq.BasicDecoder.step",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.tracks_own_finished": "tfa.seq2seq.BaseDecoder.tracks_own_finished",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.basic_decoder.BasicDecoder.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput": "tfa.seq2seq.BasicDecoderOutput",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__add__": "tfa.seq2seq.AttentionWrapperState.__add__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__contains__": "tfa.seq2seq.AttentionWrapperState.__contains__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__eq__": "tfa.seq2seq.AttentionWrapperState.__eq__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__ge__": "tfa.seq2seq.AttentionWrapperState.__ge__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__getitem__": "tfa.seq2seq.AttentionWrapperState.__getitem__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__gt__": "tfa.seq2seq.AttentionWrapperState.__gt__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__iter__": "tfa.seq2seq.AttentionWrapperState.__iter__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__le__": "tfa.seq2seq.AttentionWrapperState.__le__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__len__": "tfa.seq2seq.AttentionWrapperState.__len__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__lt__": "tfa.seq2seq.AttentionWrapperState.__lt__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__mul__": "tfa.seq2seq.AttentionWrapperState.__mul__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__ne__": "tfa.seq2seq.AttentionWrapperState.__ne__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__new__": "tfa.seq2seq.BasicDecoderOutput.__new__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__rmul__": "tfa.seq2seq.AttentionWrapperState.__rmul__",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.count": "tfa.seq2seq.AttentionWrapperState.count",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.index": "tfa.seq2seq.AttentionWrapperState.index",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.rnn_output": "tfa.seq2seq.BasicDecoderOutput.rnn_output",
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.sample_id": "tfa.seq2seq.BasicDecoderOutput.sample_id",
+ "tfa.seq2seq.basic_decoder.absolute_import": "tfa.activations.absolute_import",
+ "tfa.seq2seq.basic_decoder.division": "tfa.activations.division",
+ "tfa.seq2seq.basic_decoder.print_function": "tfa.activations.print_function",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder": "tfa.seq2seq.BeamSearchDecoder",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__init__": "tfa.seq2seq.BeamSearchDecoder.__init__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.batch_size": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.batch_size",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.build": "tfa.layers.GELU.build",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.call": "tfa.seq2seq.BeamSearchDecoder.call",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.finalize": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.finalize",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_config": "tfa.seq2seq.AttentionWrapper.get_config",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.initialize": "tfa.seq2seq.BeamSearchDecoder.initialize",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output_dtype": "tfa.seq2seq.BeamSearchDecoder.output_dtype",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output_size": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.output_size",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.step": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.step",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.tracks_own_finished": "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.tracks_own_finished",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput": "tfa.seq2seq.BeamSearchDecoderOutput",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__add__": "tfa.seq2seq.AttentionWrapperState.__add__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__contains__": "tfa.seq2seq.AttentionWrapperState.__contains__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__eq__": "tfa.seq2seq.AttentionWrapperState.__eq__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__ge__": "tfa.seq2seq.AttentionWrapperState.__ge__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__getitem__": "tfa.seq2seq.AttentionWrapperState.__getitem__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__gt__": "tfa.seq2seq.AttentionWrapperState.__gt__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__iter__": "tfa.seq2seq.AttentionWrapperState.__iter__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__le__": "tfa.seq2seq.AttentionWrapperState.__le__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__len__": "tfa.seq2seq.AttentionWrapperState.__len__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__lt__": "tfa.seq2seq.AttentionWrapperState.__lt__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__mul__": "tfa.seq2seq.AttentionWrapperState.__mul__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__ne__": "tfa.seq2seq.AttentionWrapperState.__ne__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__new__": "tfa.seq2seq.BeamSearchDecoderOutput.__new__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__rmul__": "tfa.seq2seq.AttentionWrapperState.__rmul__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.count": "tfa.seq2seq.AttentionWrapperState.count",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.index": "tfa.seq2seq.AttentionWrapperState.index",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.parent_ids": "tfa.seq2seq.BeamSearchDecoderOutput.parent_ids",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.predicted_ids": "tfa.seq2seq.BeamSearchDecoderOutput.predicted_ids",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.scores": "tfa.seq2seq.BeamSearchDecoderOutput.scores",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState": "tfa.seq2seq.BeamSearchDecoderState",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__add__": "tfa.seq2seq.AttentionWrapperState.__add__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__contains__": "tfa.seq2seq.AttentionWrapperState.__contains__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__eq__": "tfa.seq2seq.AttentionWrapperState.__eq__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__ge__": "tfa.seq2seq.AttentionWrapperState.__ge__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__getitem__": "tfa.seq2seq.AttentionWrapperState.__getitem__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__gt__": "tfa.seq2seq.AttentionWrapperState.__gt__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__iter__": "tfa.seq2seq.AttentionWrapperState.__iter__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__le__": "tfa.seq2seq.AttentionWrapperState.__le__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__len__": "tfa.seq2seq.AttentionWrapperState.__len__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__lt__": "tfa.seq2seq.AttentionWrapperState.__lt__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__mul__": "tfa.seq2seq.AttentionWrapperState.__mul__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__ne__": "tfa.seq2seq.AttentionWrapperState.__ne__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__new__": "tfa.seq2seq.BeamSearchDecoderState.__new__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__rmul__": "tfa.seq2seq.AttentionWrapperState.__rmul__",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.accumulated_attention_probs": "tfa.seq2seq.BeamSearchDecoderState.accumulated_attention_probs",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.cell_state": "tfa.seq2seq.BeamSearchDecoderState.cell_state",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.count": "tfa.seq2seq.AttentionWrapperState.count",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.finished": "tfa.seq2seq.BeamSearchDecoderState.finished",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.index": "tfa.seq2seq.AttentionWrapperState.index",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.lengths": "tfa.seq2seq.BeamSearchDecoderState.lengths",
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.log_probs": "tfa.seq2seq.BeamSearchDecoderState.log_probs",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput": "tfa.seq2seq.FinalBeamSearchDecoderOutput",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__add__": "tfa.seq2seq.AttentionWrapperState.__add__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__contains__": "tfa.seq2seq.AttentionWrapperState.__contains__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__eq__": "tfa.seq2seq.AttentionWrapperState.__eq__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__ge__": "tfa.seq2seq.AttentionWrapperState.__ge__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__getitem__": "tfa.seq2seq.AttentionWrapperState.__getitem__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__gt__": "tfa.seq2seq.AttentionWrapperState.__gt__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__iter__": "tfa.seq2seq.AttentionWrapperState.__iter__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__le__": "tfa.seq2seq.AttentionWrapperState.__le__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__len__": "tfa.seq2seq.AttentionWrapperState.__len__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__lt__": "tfa.seq2seq.AttentionWrapperState.__lt__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__mul__": "tfa.seq2seq.AttentionWrapperState.__mul__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__ne__": "tfa.seq2seq.AttentionWrapperState.__ne__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__new__": "tfa.seq2seq.FinalBeamSearchDecoderOutput.__new__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__rmul__": "tfa.seq2seq.AttentionWrapperState.__rmul__",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.beam_search_decoder_output": "tfa.seq2seq.FinalBeamSearchDecoderOutput.beam_search_decoder_output",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.count": "tfa.seq2seq.AttentionWrapperState.count",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.index": "tfa.seq2seq.AttentionWrapperState.index",
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.predicted_ids": "tfa.seq2seq.FinalBeamSearchDecoderOutput.predicted_ids",
+ "tfa.seq2seq.beam_search_decoder.absolute_import": "tfa.activations.absolute_import",
+ "tfa.seq2seq.beam_search_decoder.division": "tfa.activations.division",
+ "tfa.seq2seq.beam_search_decoder.gather_tree_from_array": "tfa.seq2seq.gather_tree_from_array",
+ "tfa.seq2seq.beam_search_decoder.print_function": "tfa.activations.print_function",
+ "tfa.seq2seq.beam_search_decoder.tile_batch": "tfa.seq2seq.tile_batch",
+ "tfa.seq2seq.decoder.BaseDecoder": "tfa.seq2seq.BaseDecoder",
+ "tfa.seq2seq.decoder.BaseDecoder.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.seq2seq.decoder.BaseDecoder.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.decoder.BaseDecoder.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.decoder.BaseDecoder.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.decoder.BaseDecoder.__init__": "tfa.seq2seq.BaseDecoder.__init__",
+ "tfa.seq2seq.decoder.BaseDecoder.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.decoder.BaseDecoder.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.decoder.BaseDecoder.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.decoder.BaseDecoder.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.decoder.BaseDecoder.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.seq2seq.decoder.BaseDecoder.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.seq2seq.decoder.BaseDecoder.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.seq2seq.decoder.BaseDecoder.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.seq2seq.decoder.BaseDecoder.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.seq2seq.decoder.BaseDecoder.batch_size": "tfa.seq2seq.BaseDecoder.batch_size",
+ "tfa.seq2seq.decoder.BaseDecoder.build": "tfa.layers.GELU.build",
+ "tfa.seq2seq.decoder.BaseDecoder.call": "tfa.seq2seq.BaseDecoder.call",
+ "tfa.seq2seq.decoder.BaseDecoder.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.seq2seq.decoder.BaseDecoder.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.seq2seq.decoder.BaseDecoder.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.seq2seq.decoder.BaseDecoder.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.seq2seq.decoder.BaseDecoder.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.seq2seq.decoder.BaseDecoder.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.seq2seq.decoder.BaseDecoder.finalize": "tfa.seq2seq.BaseDecoder.finalize",
+ "tfa.seq2seq.decoder.BaseDecoder.get_config": "tfa.seq2seq.AttentionWrapper.get_config",
+ "tfa.seq2seq.decoder.BaseDecoder.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.seq2seq.decoder.BaseDecoder.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.seq2seq.decoder.BaseDecoder.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.seq2seq.decoder.BaseDecoder.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.seq2seq.decoder.BaseDecoder.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.seq2seq.decoder.BaseDecoder.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.seq2seq.decoder.BaseDecoder.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.seq2seq.decoder.BaseDecoder.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.seq2seq.decoder.BaseDecoder.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.seq2seq.decoder.BaseDecoder.initialize": "tfa.seq2seq.BaseDecoder.initialize",
+ "tfa.seq2seq.decoder.BaseDecoder.input": "tfa.layers.CorrelationCost.input",
+ "tfa.seq2seq.decoder.BaseDecoder.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.seq2seq.decoder.BaseDecoder.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.seq2seq.decoder.BaseDecoder.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.seq2seq.decoder.BaseDecoder.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.seq2seq.decoder.BaseDecoder.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.seq2seq.decoder.BaseDecoder.name": "tfa.layers.CorrelationCost.name",
+ "tfa.seq2seq.decoder.BaseDecoder.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.seq2seq.decoder.BaseDecoder.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.seq2seq.decoder.BaseDecoder.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.seq2seq.decoder.BaseDecoder.output": "tfa.layers.CorrelationCost.output",
+ "tfa.seq2seq.decoder.BaseDecoder.output_dtype": "tfa.seq2seq.BaseDecoder.output_dtype",
+ "tfa.seq2seq.decoder.BaseDecoder.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.seq2seq.decoder.BaseDecoder.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.seq2seq.decoder.BaseDecoder.output_size": "tfa.seq2seq.BaseDecoder.output_size",
+ "tfa.seq2seq.decoder.BaseDecoder.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.seq2seq.decoder.BaseDecoder.step": "tfa.seq2seq.BaseDecoder.step",
+ "tfa.seq2seq.decoder.BaseDecoder.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.seq2seq.decoder.BaseDecoder.tracks_own_finished": "tfa.seq2seq.BaseDecoder.tracks_own_finished",
+ "tfa.seq2seq.decoder.BaseDecoder.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.seq2seq.decoder.BaseDecoder.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.seq2seq.decoder.BaseDecoder.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.seq2seq.decoder.BaseDecoder.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.seq2seq.decoder.BaseDecoder.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.seq2seq.decoder.BaseDecoder.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.seq2seq.decoder.Decoder": "tfa.seq2seq.Decoder",
+ "tfa.seq2seq.decoder.Decoder.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.decoder.Decoder.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.decoder.Decoder.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.decoder.Decoder.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.decoder.Decoder.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.decoder.Decoder.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.decoder.Decoder.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.decoder.Decoder.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.decoder.Decoder.batch_size": "tfa.seq2seq.Decoder.batch_size",
+ "tfa.seq2seq.decoder.Decoder.finalize": "tfa.seq2seq.Decoder.finalize",
+ "tfa.seq2seq.decoder.Decoder.initialize": "tfa.seq2seq.Decoder.initialize",
+ "tfa.seq2seq.decoder.Decoder.output_dtype": "tfa.seq2seq.Decoder.output_dtype",
+ "tfa.seq2seq.decoder.Decoder.output_size": "tfa.seq2seq.Decoder.output_size",
+ "tfa.seq2seq.decoder.Decoder.step": "tfa.seq2seq.Decoder.step",
+ "tfa.seq2seq.decoder.Decoder.tracks_own_finished": "tfa.seq2seq.Decoder.tracks_own_finished",
+ "tfa.seq2seq.decoder.absolute_import": "tfa.activations.absolute_import",
+ "tfa.seq2seq.decoder.division": "tfa.activations.division",
+ "tfa.seq2seq.decoder.dynamic_decode": "tfa.seq2seq.dynamic_decode",
+ "tfa.seq2seq.decoder.print_function": "tfa.activations.print_function",
+ "tfa.seq2seq.division": "tfa.activations.division",
+ "tfa.seq2seq.loss.SequenceLoss": "tfa.seq2seq.SequenceLoss",
+ "tfa.seq2seq.loss.SequenceLoss.__call__": "tfa.seq2seq.SequenceLoss.__call__",
+ "tfa.seq2seq.loss.SequenceLoss.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.loss.SequenceLoss.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.loss.SequenceLoss.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.loss.SequenceLoss.__init__": "tfa.seq2seq.SequenceLoss.__init__",
+ "tfa.seq2seq.loss.SequenceLoss.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.loss.SequenceLoss.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.loss.SequenceLoss.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.loss.SequenceLoss.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.loss.SequenceLoss.call": "tfa.seq2seq.SequenceLoss.call",
+ "tfa.seq2seq.loss.SequenceLoss.get_config": "tfa.losses.NpairsLoss.get_config",
+ "tfa.seq2seq.loss.absolute_import": "tfa.activations.absolute_import",
+ "tfa.seq2seq.loss.division": "tfa.activations.division",
+ "tfa.seq2seq.loss.print_function": "tfa.activations.print_function",
+ "tfa.seq2seq.loss.sequence_loss": "tfa.seq2seq.sequence_loss",
+ "tfa.seq2seq.print_function": "tfa.activations.print_function",
+ "tfa.seq2seq.sampler.CustomSampler": "tfa.seq2seq.CustomSampler",
+ "tfa.seq2seq.sampler.CustomSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.sampler.CustomSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.sampler.CustomSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.sampler.CustomSampler.__init__": "tfa.seq2seq.CustomSampler.__init__",
+ "tfa.seq2seq.sampler.CustomSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.sampler.CustomSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.sampler.CustomSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.sampler.CustomSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.sampler.CustomSampler.batch_size": "tfa.seq2seq.CustomSampler.batch_size",
+ "tfa.seq2seq.sampler.CustomSampler.initialize": "tfa.seq2seq.CustomSampler.initialize",
+ "tfa.seq2seq.sampler.CustomSampler.next_inputs": "tfa.seq2seq.CustomSampler.next_inputs",
+ "tfa.seq2seq.sampler.CustomSampler.sample": "tfa.seq2seq.CustomSampler.sample",
+ "tfa.seq2seq.sampler.CustomSampler.sample_ids_dtype": "tfa.seq2seq.CustomSampler.sample_ids_dtype",
+ "tfa.seq2seq.sampler.CustomSampler.sample_ids_shape": "tfa.seq2seq.CustomSampler.sample_ids_shape",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler": "tfa.seq2seq.GreedyEmbeddingSampler",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__init__": "tfa.seq2seq.GreedyEmbeddingSampler.__init__",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.batch_size": "tfa.seq2seq.GreedyEmbeddingSampler.batch_size",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.initialize": "tfa.seq2seq.GreedyEmbeddingSampler.initialize",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.next_inputs": "tfa.seq2seq.GreedyEmbeddingSampler.next_inputs",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.sample": "tfa.seq2seq.GreedyEmbeddingSampler.sample",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.sample_ids_dtype": "tfa.seq2seq.GreedyEmbeddingSampler.sample_ids_dtype",
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.sample_ids_shape": "tfa.seq2seq.GreedyEmbeddingSampler.sample_ids_shape",
+ "tfa.seq2seq.sampler.InferenceSampler": "tfa.seq2seq.InferenceSampler",
+ "tfa.seq2seq.sampler.InferenceSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.sampler.InferenceSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.sampler.InferenceSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.sampler.InferenceSampler.__init__": "tfa.seq2seq.InferenceSampler.__init__",
+ "tfa.seq2seq.sampler.InferenceSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.sampler.InferenceSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.sampler.InferenceSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.sampler.InferenceSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.sampler.InferenceSampler.batch_size": "tfa.seq2seq.InferenceSampler.batch_size",
+ "tfa.seq2seq.sampler.InferenceSampler.initialize": "tfa.seq2seq.InferenceSampler.initialize",
+ "tfa.seq2seq.sampler.InferenceSampler.next_inputs": "tfa.seq2seq.InferenceSampler.next_inputs",
+ "tfa.seq2seq.sampler.InferenceSampler.sample": "tfa.seq2seq.InferenceSampler.sample",
+ "tfa.seq2seq.sampler.InferenceSampler.sample_ids_dtype": "tfa.seq2seq.InferenceSampler.sample_ids_dtype",
+ "tfa.seq2seq.sampler.InferenceSampler.sample_ids_shape": "tfa.seq2seq.InferenceSampler.sample_ids_shape",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler": "tfa.seq2seq.SampleEmbeddingSampler",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__init__": "tfa.seq2seq.SampleEmbeddingSampler.__init__",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.batch_size": "tfa.seq2seq.GreedyEmbeddingSampler.batch_size",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.initialize": "tfa.seq2seq.GreedyEmbeddingSampler.initialize",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.next_inputs": "tfa.seq2seq.GreedyEmbeddingSampler.next_inputs",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.sample": "tfa.seq2seq.SampleEmbeddingSampler.sample",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.sample_ids_dtype": "tfa.seq2seq.GreedyEmbeddingSampler.sample_ids_dtype",
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.sample_ids_shape": "tfa.seq2seq.GreedyEmbeddingSampler.sample_ids_shape",
+ "tfa.seq2seq.sampler.Sampler": "tfa.seq2seq.Sampler",
+ "tfa.seq2seq.sampler.Sampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.sampler.Sampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.sampler.Sampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.sampler.Sampler.__init__": "tfa.seq2seq.AttentionMechanism.__init__",
+ "tfa.seq2seq.sampler.Sampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.sampler.Sampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.sampler.Sampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.sampler.Sampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.sampler.Sampler.batch_size": "tfa.seq2seq.Sampler.batch_size",
+ "tfa.seq2seq.sampler.Sampler.initialize": "tfa.seq2seq.Sampler.initialize",
+ "tfa.seq2seq.sampler.Sampler.next_inputs": "tfa.seq2seq.Sampler.next_inputs",
+ "tfa.seq2seq.sampler.Sampler.sample": "tfa.seq2seq.Sampler.sample",
+ "tfa.seq2seq.sampler.Sampler.sample_ids_dtype": "tfa.seq2seq.Sampler.sample_ids_dtype",
+ "tfa.seq2seq.sampler.Sampler.sample_ids_shape": "tfa.seq2seq.Sampler.sample_ids_shape",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler": "tfa.seq2seq.ScheduledEmbeddingTrainingSampler",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__init__": "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__init__",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.batch_size": "tfa.seq2seq.TrainingSampler.batch_size",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.initialize": "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.initialize",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.next_inputs": "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.next_inputs",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.sample": "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.sample",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.sample_ids_dtype": "tfa.seq2seq.TrainingSampler.sample_ids_dtype",
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.sample_ids_shape": "tfa.seq2seq.TrainingSampler.sample_ids_shape",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler": "tfa.seq2seq.ScheduledOutputTrainingSampler",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__init__": "tfa.seq2seq.ScheduledOutputTrainingSampler.__init__",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.batch_size": "tfa.seq2seq.TrainingSampler.batch_size",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.initialize": "tfa.seq2seq.ScheduledOutputTrainingSampler.initialize",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.next_inputs": "tfa.seq2seq.ScheduledOutputTrainingSampler.next_inputs",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.sample": "tfa.seq2seq.ScheduledOutputTrainingSampler.sample",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.sample_ids_dtype": "tfa.seq2seq.TrainingSampler.sample_ids_dtype",
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.sample_ids_shape": "tfa.seq2seq.TrainingSampler.sample_ids_shape",
+ "tfa.seq2seq.sampler.TrainingSampler": "tfa.seq2seq.TrainingSampler",
+ "tfa.seq2seq.sampler.TrainingSampler.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.seq2seq.sampler.TrainingSampler.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.seq2seq.sampler.TrainingSampler.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.seq2seq.sampler.TrainingSampler.__init__": "tfa.seq2seq.TrainingSampler.__init__",
+ "tfa.seq2seq.sampler.TrainingSampler.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.seq2seq.sampler.TrainingSampler.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.seq2seq.sampler.TrainingSampler.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.seq2seq.sampler.TrainingSampler.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.seq2seq.sampler.TrainingSampler.batch_size": "tfa.seq2seq.TrainingSampler.batch_size",
+ "tfa.seq2seq.sampler.TrainingSampler.initialize": "tfa.seq2seq.TrainingSampler.initialize",
+ "tfa.seq2seq.sampler.TrainingSampler.next_inputs": "tfa.seq2seq.TrainingSampler.next_inputs",
+ "tfa.seq2seq.sampler.TrainingSampler.sample": "tfa.seq2seq.TrainingSampler.sample",
+ "tfa.seq2seq.sampler.TrainingSampler.sample_ids_dtype": "tfa.seq2seq.TrainingSampler.sample_ids_dtype",
+ "tfa.seq2seq.sampler.TrainingSampler.sample_ids_shape": "tfa.seq2seq.TrainingSampler.sample_ids_shape",
+ "tfa.seq2seq.sampler.absolute_import": "tfa.activations.absolute_import",
+ "tfa.seq2seq.sampler.division": "tfa.activations.division",
+ "tfa.seq2seq.sampler.print_function": "tfa.activations.print_function",
+ "tfa.text.absolute_import": "tfa.activations.absolute_import",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__call__": "tfa.layers.CorrelationCost.__call__",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__eq__": "tfa.callbacks.TQDMProgressBar.__eq__",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__ge__": "tfa.callbacks.TQDMProgressBar.__ge__",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__gt__": "tfa.callbacks.TQDMProgressBar.__gt__",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__le__": "tfa.callbacks.TQDMProgressBar.__le__",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__lt__": "tfa.callbacks.TQDMProgressBar.__lt__",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__ne__": "tfa.callbacks.TQDMProgressBar.__ne__",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__new__": "tfa.callbacks.TQDMProgressBar.__new__",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.activity_regularizer": "tfa.layers.CorrelationCost.activity_regularizer",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.add_loss": "tfa.layers.CorrelationCost.add_loss",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.add_metric": "tfa.layers.CorrelationCost.add_metric",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.add_update": "tfa.layers.CorrelationCost.add_update",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.add_weight": "tfa.layers.CorrelationCost.add_weight",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.compute_mask": "tfa.layers.CorrelationCost.compute_mask",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.compute_output_shape": "tfa.metrics.CohenKappa.compute_output_shape",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.compute_output_signature": "tfa.layers.CorrelationCost.compute_output_signature",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.count_params": "tfa.layers.CorrelationCost.count_params",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.dtype": "tfa.layers.CorrelationCost.dtype",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.dynamic": "tfa.layers.CorrelationCost.dynamic",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_config": "tfa.seq2seq.AttentionWrapper.get_config",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_initial_state": "tfa.rnn.NASCell.get_initial_state",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_input_at": "tfa.layers.CorrelationCost.get_input_at",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_input_mask_at": "tfa.layers.CorrelationCost.get_input_mask_at",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_input_shape_at": "tfa.layers.CorrelationCost.get_input_shape_at",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_losses_for": "tfa.layers.CorrelationCost.get_losses_for",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_output_at": "tfa.layers.CorrelationCost.get_output_at",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_output_mask_at": "tfa.layers.CorrelationCost.get_output_mask_at",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_output_shape_at": "tfa.layers.CorrelationCost.get_output_shape_at",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_updates_for": "tfa.layers.CorrelationCost.get_updates_for",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_weights": "tfa.layers.CorrelationCost.get_weights",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.input": "tfa.layers.CorrelationCost.input",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.input_mask": "tfa.layers.CorrelationCost.input_mask",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.input_shape": "tfa.layers.CorrelationCost.input_shape",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.input_spec": "tfa.layers.CorrelationCost.input_spec",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.losses": "tfa.layers.CorrelationCost.losses",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.metrics": "tfa.layers.CorrelationCost.metrics",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.name": "tfa.layers.CorrelationCost.name",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.name_scope": "tfa.layers.CorrelationCost.name_scope",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.non_trainable_variables": "tfa.layers.CorrelationCost.non_trainable_variables",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.non_trainable_weights": "tfa.layers.CorrelationCost.non_trainable_weights",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.output": "tfa.layers.CorrelationCost.output",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.output_mask": "tfa.layers.CorrelationCost.output_mask",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.output_shape": "tfa.layers.CorrelationCost.output_shape",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.set_weights": "tfa.layers.CorrelationCost.set_weights",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.submodules": "tfa.layers.CorrelationCost.submodules",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.trainable": "tfa.layers.CorrelationCost.trainable",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.trainable_variables": "tfa.layers.CorrelationCost.trainable_variables",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.trainable_weights": "tfa.layers.CorrelationCost.trainable_weights",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.updates": "tfa.layers.CorrelationCost.updates",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.variables": "tfa.layers.CorrelationCost.variables",
+ "tfa.text.crf.CrfDecodeForwardRnnCell.weights": "tfa.layers.CorrelationCost.weights",
+ "tfa.text.crf.absolute_import": "tfa.activations.absolute_import",
+ "tfa.text.crf.crf_binary_score": "tfa.text.crf_binary_score",
+ "tfa.text.crf.crf_decode": "tfa.text.crf_decode",
+ "tfa.text.crf.crf_decode_backward": "tfa.text.crf_decode_backward",
+ "tfa.text.crf.crf_decode_forward": "tfa.text.crf_decode_forward",
+ "tfa.text.crf.crf_forward": "tfa.text.crf_forward",
+ "tfa.text.crf.crf_log_likelihood": "tfa.text.crf_log_likelihood",
+ "tfa.text.crf.crf_log_norm": "tfa.text.crf_log_norm",
+ "tfa.text.crf.crf_multitag_sequence_score": "tfa.text.crf_multitag_sequence_score",
+ "tfa.text.crf.crf_sequence_score": "tfa.text.crf_sequence_score",
+ "tfa.text.crf.crf_unary_score": "tfa.text.crf_unary_score",
+ "tfa.text.crf.division": "tfa.activations.division",
+ "tfa.text.crf.print_function": "tfa.activations.print_function",
+ "tfa.text.crf.viterbi_decode": "tfa.text.viterbi_decode",
+ "tfa.text.division": "tfa.activations.division",
+ "tfa.text.parse_time_op.absolute_import": "tfa.activations.absolute_import",
+ "tfa.text.parse_time_op.division": "tfa.activations.division",
+ "tfa.text.parse_time_op.parse_time": "tfa.text.parse_time",
+ "tfa.text.parse_time_op.print_function": "tfa.activations.print_function",
+ "tfa.text.print_function": "tfa.activations.print_function",
+ "tfa.text.skip_gram_ops.absolute_import": "tfa.activations.absolute_import",
+ "tfa.text.skip_gram_ops.division": "tfa.activations.division",
+ "tfa.text.skip_gram_ops.print_function": "tfa.activations.print_function",
+ "tfa.text.skip_gram_ops.skip_gram_sample": "tfa.text.skip_gram_sample",
+ "tfa.text.skip_gram_ops.skip_gram_sample_with_text_vocab": "tfa.text.skip_gram_sample_with_text_vocab"
+ },
+ "is_fragment": {
+ "tfa": false,
+ "tfa.activations": false,
+ "tfa.activations.absolute_import": true,
+ "tfa.activations.division": true,
+ "tfa.activations.gelu": false,
+ "tfa.activations.hardshrink": false,
+ "tfa.activations.lisht": false,
+ "tfa.activations.mish": false,
+ "tfa.activations.print_function": true,
+ "tfa.activations.rrelu": false,
+ "tfa.activations.softshrink": false,
+ "tfa.activations.sparsemax": false,
+ "tfa.activations.tanhshrink": false,
+ "tfa.callbacks": false,
+ "tfa.callbacks.TQDMProgressBar": false,
+ "tfa.callbacks.TQDMProgressBar.__eq__": true,
+ "tfa.callbacks.TQDMProgressBar.__ge__": true,
+ "tfa.callbacks.TQDMProgressBar.__gt__": true,
+ "tfa.callbacks.TQDMProgressBar.__init__": true,
+ "tfa.callbacks.TQDMProgressBar.__le__": true,
+ "tfa.callbacks.TQDMProgressBar.__lt__": true,
+ "tfa.callbacks.TQDMProgressBar.__ne__": true,
+ "tfa.callbacks.TQDMProgressBar.__new__": true,
+ "tfa.callbacks.TQDMProgressBar.format_metrics": true,
+ "tfa.callbacks.TQDMProgressBar.get_config": true,
+ "tfa.callbacks.TQDMProgressBar.on_batch_begin": true,
+ "tfa.callbacks.TQDMProgressBar.on_batch_end": true,
+ "tfa.callbacks.TQDMProgressBar.on_epoch_begin": true,
+ "tfa.callbacks.TQDMProgressBar.on_epoch_end": true,
+ "tfa.callbacks.TQDMProgressBar.on_predict_batch_begin": true,
+ "tfa.callbacks.TQDMProgressBar.on_predict_batch_end": true,
+ "tfa.callbacks.TQDMProgressBar.on_predict_begin": true,
+ "tfa.callbacks.TQDMProgressBar.on_predict_end": true,
+ "tfa.callbacks.TQDMProgressBar.on_test_batch_begin": true,
+ "tfa.callbacks.TQDMProgressBar.on_test_batch_end": true,
+ "tfa.callbacks.TQDMProgressBar.on_test_begin": true,
+ "tfa.callbacks.TQDMProgressBar.on_test_end": true,
+ "tfa.callbacks.TQDMProgressBar.on_train_batch_begin": true,
+ "tfa.callbacks.TQDMProgressBar.on_train_batch_end": true,
+ "tfa.callbacks.TQDMProgressBar.on_train_begin": true,
+ "tfa.callbacks.TQDMProgressBar.on_train_end": true,
+ "tfa.callbacks.TQDMProgressBar.set_model": true,
+ "tfa.callbacks.TQDMProgressBar.set_params": true,
+ "tfa.callbacks.TimeStopping": false,
+ "tfa.callbacks.TimeStopping.__eq__": true,
+ "tfa.callbacks.TimeStopping.__ge__": true,
+ "tfa.callbacks.TimeStopping.__gt__": true,
+ "tfa.callbacks.TimeStopping.__init__": true,
+ "tfa.callbacks.TimeStopping.__le__": true,
+ "tfa.callbacks.TimeStopping.__lt__": true,
+ "tfa.callbacks.TimeStopping.__ne__": true,
+ "tfa.callbacks.TimeStopping.__new__": true,
+ "tfa.callbacks.TimeStopping.get_config": true,
+ "tfa.callbacks.TimeStopping.on_batch_begin": true,
+ "tfa.callbacks.TimeStopping.on_batch_end": true,
+ "tfa.callbacks.TimeStopping.on_epoch_begin": true,
+ "tfa.callbacks.TimeStopping.on_epoch_end": true,
+ "tfa.callbacks.TimeStopping.on_predict_batch_begin": true,
+ "tfa.callbacks.TimeStopping.on_predict_batch_end": true,
+ "tfa.callbacks.TimeStopping.on_predict_begin": true,
+ "tfa.callbacks.TimeStopping.on_predict_end": true,
+ "tfa.callbacks.TimeStopping.on_test_batch_begin": true,
+ "tfa.callbacks.TimeStopping.on_test_batch_end": true,
+ "tfa.callbacks.TimeStopping.on_test_begin": true,
+ "tfa.callbacks.TimeStopping.on_test_end": true,
+ "tfa.callbacks.TimeStopping.on_train_batch_begin": true,
+ "tfa.callbacks.TimeStopping.on_train_batch_end": true,
+ "tfa.callbacks.TimeStopping.on_train_begin": true,
+ "tfa.callbacks.TimeStopping.on_train_end": true,
+ "tfa.callbacks.TimeStopping.set_model": true,
+ "tfa.callbacks.TimeStopping.set_params": true,
+ "tfa.callbacks.absolute_import": true,
+ "tfa.callbacks.division": true,
+ "tfa.callbacks.print_function": true,
+ "tfa.callbacks.time_stopping": false,
+ "tfa.callbacks.time_stopping.TimeStopping": false,
+ "tfa.callbacks.time_stopping.TimeStopping.__eq__": true,
+ "tfa.callbacks.time_stopping.TimeStopping.__ge__": true,
+ "tfa.callbacks.time_stopping.TimeStopping.__gt__": true,
+ "tfa.callbacks.time_stopping.TimeStopping.__init__": true,
+ "tfa.callbacks.time_stopping.TimeStopping.__le__": true,
+ "tfa.callbacks.time_stopping.TimeStopping.__lt__": true,
+ "tfa.callbacks.time_stopping.TimeStopping.__ne__": true,
+ "tfa.callbacks.time_stopping.TimeStopping.__new__": true,
+ "tfa.callbacks.time_stopping.TimeStopping.get_config": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_batch_begin": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_batch_end": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_epoch_begin": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_epoch_end": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_predict_batch_begin": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_predict_batch_end": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_predict_begin": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_predict_end": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_test_batch_begin": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_test_batch_end": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_test_begin": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_test_end": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_train_batch_begin": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_train_batch_end": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_train_begin": true,
+ "tfa.callbacks.time_stopping.TimeStopping.on_train_end": true,
+ "tfa.callbacks.time_stopping.TimeStopping.set_model": true,
+ "tfa.callbacks.time_stopping.TimeStopping.set_params": true,
+ "tfa.callbacks.time_stopping.absolute_import": true,
+ "tfa.callbacks.time_stopping.division": true,
+ "tfa.callbacks.time_stopping.print_function": true,
+ "tfa.callbacks.tqdm_progress_bar": false,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar": false,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__eq__": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__ge__": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__gt__": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__init__": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__le__": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__lt__": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__ne__": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.__new__": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.format_metrics": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.get_config": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_batch_begin": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_batch_end": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_epoch_begin": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_epoch_end": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_predict_batch_begin": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_predict_batch_end": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_predict_begin": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_predict_end": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_test_batch_begin": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_test_batch_end": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_test_begin": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_test_end": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_train_batch_begin": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_train_batch_end": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_train_begin": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.on_train_end": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.set_model": true,
+ "tfa.callbacks.tqdm_progress_bar.TQDMProgressBar.set_params": true,
+ "tfa.callbacks.tqdm_progress_bar.absolute_import": true,
+ "tfa.callbacks.tqdm_progress_bar.division": true,
+ "tfa.callbacks.tqdm_progress_bar.print_function": true,
+ "tfa.image": false,
+ "tfa.image.absolute_import": true,
+ "tfa.image.adjust_hsv_in_yiq": false,
+ "tfa.image.connected_components": false,
+ "tfa.image.dense_image_warp": false,
+ "tfa.image.distance_transform": false,
+ "tfa.image.distance_transform.absolute_import": true,
+ "tfa.image.distance_transform.division": true,
+ "tfa.image.distance_transform.euclidean_dist_transform": false,
+ "tfa.image.distance_transform.print_function": true,
+ "tfa.image.distort_image_ops": false,
+ "tfa.image.distort_image_ops.absolute_import": true,
+ "tfa.image.distort_image_ops.adjust_hsv_in_yiq": false,
+ "tfa.image.distort_image_ops.division": true,
+ "tfa.image.distort_image_ops.print_function": true,
+ "tfa.image.distort_image_ops.random_hsv_in_yiq": false,
+ "tfa.image.division": true,
+ "tfa.image.euclidean_dist_transform": false,
+ "tfa.image.filters": false,
+ "tfa.image.filters.absolute_import": true,
+ "tfa.image.filters.division": true,
+ "tfa.image.filters.mean_filter2d": false,
+ "tfa.image.filters.median_filter2d": false,
+ "tfa.image.filters.print_function": true,
+ "tfa.image.interpolate_bilinear": false,
+ "tfa.image.interpolate_spline": false,
+ "tfa.image.mean_filter2d": false,
+ "tfa.image.median_filter2d": false,
+ "tfa.image.print_function": true,
+ "tfa.image.random_hsv_in_yiq": false,
+ "tfa.image.resampler": false,
+ "tfa.image.resampler_ops": false,
+ "tfa.image.resampler_ops.absolute_import": true,
+ "tfa.image.resampler_ops.division": true,
+ "tfa.image.resampler_ops.print_function": true,
+ "tfa.image.resampler_ops.resampler": false,
+ "tfa.image.rotate": false,
+ "tfa.image.sparse_image_warp": false,
+ "tfa.image.transform": false,
+ "tfa.image.transform_ops": false,
+ "tfa.image.transform_ops.absolute_import": true,
+ "tfa.image.transform_ops.angles_to_projective_transforms": false,
+ "tfa.image.transform_ops.compose_transforms": false,
+ "tfa.image.transform_ops.division": true,
+ "tfa.image.transform_ops.flat_transforms_to_matrices": false,
+ "tfa.image.transform_ops.matrices_to_flat_transforms": false,
+ "tfa.image.transform_ops.print_function": true,
+ "tfa.image.transform_ops.rotate": false,
+ "tfa.image.transform_ops.transform": false,
+ "tfa.image.translate": false,
+ "tfa.image.translate_ops": false,
+ "tfa.image.translate_ops.absolute_import": true,
+ "tfa.image.translate_ops.division": true,
+ "tfa.image.translate_ops.print_function": true,
+ "tfa.image.translate_ops.transform": false,
+ "tfa.image.translate_ops.translate": false,
+ "tfa.image.translate_ops.translations_to_projective_transforms": false,
+ "tfa.image.utils": false,
+ "tfa.image.utils.absolute_import": true,
+ "tfa.image.utils.division": true,
+ "tfa.image.utils.from_4D_image": false,
+ "tfa.image.utils.get_ndims": false,
+ "tfa.image.utils.print_function": true,
+ "tfa.image.utils.to_4D_image": false,
+ "tfa.layers": false,
+ "tfa.layers.CorrelationCost": false,
+ "tfa.layers.CorrelationCost.__call__": true,
+ "tfa.layers.CorrelationCost.__eq__": true,
+ "tfa.layers.CorrelationCost.__ge__": true,
+ "tfa.layers.CorrelationCost.__gt__": true,
+ "tfa.layers.CorrelationCost.__init__": true,
+ "tfa.layers.CorrelationCost.__le__": true,
+ "tfa.layers.CorrelationCost.__lt__": true,
+ "tfa.layers.CorrelationCost.__ne__": true,
+ "tfa.layers.CorrelationCost.__new__": true,
+ "tfa.layers.CorrelationCost.activity_regularizer": true,
+ "tfa.layers.CorrelationCost.add_loss": true,
+ "tfa.layers.CorrelationCost.add_metric": true,
+ "tfa.layers.CorrelationCost.add_update": true,
+ "tfa.layers.CorrelationCost.add_weight": true,
+ "tfa.layers.CorrelationCost.build": true,
+ "tfa.layers.CorrelationCost.call": true,
+ "tfa.layers.CorrelationCost.compute_mask": true,
+ "tfa.layers.CorrelationCost.compute_output_shape": true,
+ "tfa.layers.CorrelationCost.compute_output_signature": true,
+ "tfa.layers.CorrelationCost.count_params": true,
+ "tfa.layers.CorrelationCost.dtype": true,
+ "tfa.layers.CorrelationCost.dynamic": true,
+ "tfa.layers.CorrelationCost.from_config": true,
+ "tfa.layers.CorrelationCost.get_config": true,
+ "tfa.layers.CorrelationCost.get_input_at": true,
+ "tfa.layers.CorrelationCost.get_input_mask_at": true,
+ "tfa.layers.CorrelationCost.get_input_shape_at": true,
+ "tfa.layers.CorrelationCost.get_losses_for": true,
+ "tfa.layers.CorrelationCost.get_output_at": true,
+ "tfa.layers.CorrelationCost.get_output_mask_at": true,
+ "tfa.layers.CorrelationCost.get_output_shape_at": true,
+ "tfa.layers.CorrelationCost.get_updates_for": true,
+ "tfa.layers.CorrelationCost.get_weights": true,
+ "tfa.layers.CorrelationCost.input": true,
+ "tfa.layers.CorrelationCost.input_mask": true,
+ "tfa.layers.CorrelationCost.input_shape": true,
+ "tfa.layers.CorrelationCost.input_spec": true,
+ "tfa.layers.CorrelationCost.losses": true,
+ "tfa.layers.CorrelationCost.metrics": true,
+ "tfa.layers.CorrelationCost.name": true,
+ "tfa.layers.CorrelationCost.name_scope": true,
+ "tfa.layers.CorrelationCost.non_trainable_variables": true,
+ "tfa.layers.CorrelationCost.non_trainable_weights": true,
+ "tfa.layers.CorrelationCost.output": true,
+ "tfa.layers.CorrelationCost.output_mask": true,
+ "tfa.layers.CorrelationCost.output_shape": true,
+ "tfa.layers.CorrelationCost.set_weights": true,
+ "tfa.layers.CorrelationCost.submodules": true,
+ "tfa.layers.CorrelationCost.trainable": true,
+ "tfa.layers.CorrelationCost.trainable_variables": true,
+ "tfa.layers.CorrelationCost.trainable_weights": true,
+ "tfa.layers.CorrelationCost.updates": true,
+ "tfa.layers.CorrelationCost.variables": true,
+ "tfa.layers.CorrelationCost.weights": true,
+ "tfa.layers.CorrelationCost.with_name_scope": true,
+ "tfa.layers.GELU": false,
+ "tfa.layers.GELU.__call__": true,
+ "tfa.layers.GELU.__eq__": true,
+ "tfa.layers.GELU.__ge__": true,
+ "tfa.layers.GELU.__gt__": true,
+ "tfa.layers.GELU.__init__": true,
+ "tfa.layers.GELU.__le__": true,
+ "tfa.layers.GELU.__lt__": true,
+ "tfa.layers.GELU.__ne__": true,
+ "tfa.layers.GELU.__new__": true,
+ "tfa.layers.GELU.activity_regularizer": true,
+ "tfa.layers.GELU.add_loss": true,
+ "tfa.layers.GELU.add_metric": true,
+ "tfa.layers.GELU.add_update": true,
+ "tfa.layers.GELU.add_weight": true,
+ "tfa.layers.GELU.build": true,
+ "tfa.layers.GELU.call": true,
+ "tfa.layers.GELU.compute_mask": true,
+ "tfa.layers.GELU.compute_output_shape": true,
+ "tfa.layers.GELU.compute_output_signature": true,
+ "tfa.layers.GELU.count_params": true,
+ "tfa.layers.GELU.dtype": true,
+ "tfa.layers.GELU.dynamic": true,
+ "tfa.layers.GELU.from_config": true,
+ "tfa.layers.GELU.get_config": true,
+ "tfa.layers.GELU.get_input_at": true,
+ "tfa.layers.GELU.get_input_mask_at": true,
+ "tfa.layers.GELU.get_input_shape_at": true,
+ "tfa.layers.GELU.get_losses_for": true,
+ "tfa.layers.GELU.get_output_at": true,
+ "tfa.layers.GELU.get_output_mask_at": true,
+ "tfa.layers.GELU.get_output_shape_at": true,
+ "tfa.layers.GELU.get_updates_for": true,
+ "tfa.layers.GELU.get_weights": true,
+ "tfa.layers.GELU.input": true,
+ "tfa.layers.GELU.input_mask": true,
+ "tfa.layers.GELU.input_shape": true,
+ "tfa.layers.GELU.input_spec": true,
+ "tfa.layers.GELU.losses": true,
+ "tfa.layers.GELU.metrics": true,
+ "tfa.layers.GELU.name": true,
+ "tfa.layers.GELU.name_scope": true,
+ "tfa.layers.GELU.non_trainable_variables": true,
+ "tfa.layers.GELU.non_trainable_weights": true,
+ "tfa.layers.GELU.output": true,
+ "tfa.layers.GELU.output_mask": true,
+ "tfa.layers.GELU.output_shape": true,
+ "tfa.layers.GELU.set_weights": true,
+ "tfa.layers.GELU.submodules": true,
+ "tfa.layers.GELU.trainable": true,
+ "tfa.layers.GELU.trainable_variables": true,
+ "tfa.layers.GELU.trainable_weights": true,
+ "tfa.layers.GELU.updates": true,
+ "tfa.layers.GELU.variables": true,
+ "tfa.layers.GELU.weights": true,
+ "tfa.layers.GELU.with_name_scope": true,
+ "tfa.layers.GroupNormalization": false,
+ "tfa.layers.GroupNormalization.__call__": true,
+ "tfa.layers.GroupNormalization.__eq__": true,
+ "tfa.layers.GroupNormalization.__ge__": true,
+ "tfa.layers.GroupNormalization.__gt__": true,
+ "tfa.layers.GroupNormalization.__init__": true,
+ "tfa.layers.GroupNormalization.__le__": true,
+ "tfa.layers.GroupNormalization.__lt__": true,
+ "tfa.layers.GroupNormalization.__ne__": true,
+ "tfa.layers.GroupNormalization.__new__": true,
+ "tfa.layers.GroupNormalization.activity_regularizer": true,
+ "tfa.layers.GroupNormalization.add_loss": true,
+ "tfa.layers.GroupNormalization.add_metric": true,
+ "tfa.layers.GroupNormalization.add_update": true,
+ "tfa.layers.GroupNormalization.add_weight": true,
+ "tfa.layers.GroupNormalization.build": true,
+ "tfa.layers.GroupNormalization.call": true,
+ "tfa.layers.GroupNormalization.compute_mask": true,
+ "tfa.layers.GroupNormalization.compute_output_shape": true,
+ "tfa.layers.GroupNormalization.compute_output_signature": true,
+ "tfa.layers.GroupNormalization.count_params": true,
+ "tfa.layers.GroupNormalization.dtype": true,
+ "tfa.layers.GroupNormalization.dynamic": true,
+ "tfa.layers.GroupNormalization.from_config": true,
+ "tfa.layers.GroupNormalization.get_config": true,
+ "tfa.layers.GroupNormalization.get_input_at": true,
+ "tfa.layers.GroupNormalization.get_input_mask_at": true,
+ "tfa.layers.GroupNormalization.get_input_shape_at": true,
+ "tfa.layers.GroupNormalization.get_losses_for": true,
+ "tfa.layers.GroupNormalization.get_output_at": true,
+ "tfa.layers.GroupNormalization.get_output_mask_at": true,
+ "tfa.layers.GroupNormalization.get_output_shape_at": true,
+ "tfa.layers.GroupNormalization.get_updates_for": true,
+ "tfa.layers.GroupNormalization.get_weights": true,
+ "tfa.layers.GroupNormalization.input": true,
+ "tfa.layers.GroupNormalization.input_mask": true,
+ "tfa.layers.GroupNormalization.input_shape": true,
+ "tfa.layers.GroupNormalization.input_spec": true,
+ "tfa.layers.GroupNormalization.losses": true,
+ "tfa.layers.GroupNormalization.metrics": true,
+ "tfa.layers.GroupNormalization.name": true,
+ "tfa.layers.GroupNormalization.name_scope": true,
+ "tfa.layers.GroupNormalization.non_trainable_variables": true,
+ "tfa.layers.GroupNormalization.non_trainable_weights": true,
+ "tfa.layers.GroupNormalization.output": true,
+ "tfa.layers.GroupNormalization.output_mask": true,
+ "tfa.layers.GroupNormalization.output_shape": true,
+ "tfa.layers.GroupNormalization.set_weights": true,
+ "tfa.layers.GroupNormalization.submodules": true,
+ "tfa.layers.GroupNormalization.trainable": true,
+ "tfa.layers.GroupNormalization.trainable_variables": true,
+ "tfa.layers.GroupNormalization.trainable_weights": true,
+ "tfa.layers.GroupNormalization.updates": true,
+ "tfa.layers.GroupNormalization.variables": true,
+ "tfa.layers.GroupNormalization.weights": true,
+ "tfa.layers.GroupNormalization.with_name_scope": true,
+ "tfa.layers.InstanceNormalization": false,
+ "tfa.layers.InstanceNormalization.__call__": true,
+ "tfa.layers.InstanceNormalization.__eq__": true,
+ "tfa.layers.InstanceNormalization.__ge__": true,
+ "tfa.layers.InstanceNormalization.__gt__": true,
+ "tfa.layers.InstanceNormalization.__init__": true,
+ "tfa.layers.InstanceNormalization.__le__": true,
+ "tfa.layers.InstanceNormalization.__lt__": true,
+ "tfa.layers.InstanceNormalization.__ne__": true,
+ "tfa.layers.InstanceNormalization.__new__": true,
+ "tfa.layers.InstanceNormalization.activity_regularizer": true,
+ "tfa.layers.InstanceNormalization.add_loss": true,
+ "tfa.layers.InstanceNormalization.add_metric": true,
+ "tfa.layers.InstanceNormalization.add_update": true,
+ "tfa.layers.InstanceNormalization.add_weight": true,
+ "tfa.layers.InstanceNormalization.build": true,
+ "tfa.layers.InstanceNormalization.call": true,
+ "tfa.layers.InstanceNormalization.compute_mask": true,
+ "tfa.layers.InstanceNormalization.compute_output_shape": true,
+ "tfa.layers.InstanceNormalization.compute_output_signature": true,
+ "tfa.layers.InstanceNormalization.count_params": true,
+ "tfa.layers.InstanceNormalization.dtype": true,
+ "tfa.layers.InstanceNormalization.dynamic": true,
+ "tfa.layers.InstanceNormalization.from_config": true,
+ "tfa.layers.InstanceNormalization.get_config": true,
+ "tfa.layers.InstanceNormalization.get_input_at": true,
+ "tfa.layers.InstanceNormalization.get_input_mask_at": true,
+ "tfa.layers.InstanceNormalization.get_input_shape_at": true,
+ "tfa.layers.InstanceNormalization.get_losses_for": true,
+ "tfa.layers.InstanceNormalization.get_output_at": true,
+ "tfa.layers.InstanceNormalization.get_output_mask_at": true,
+ "tfa.layers.InstanceNormalization.get_output_shape_at": true,
+ "tfa.layers.InstanceNormalization.get_updates_for": true,
+ "tfa.layers.InstanceNormalization.get_weights": true,
+ "tfa.layers.InstanceNormalization.input": true,
+ "tfa.layers.InstanceNormalization.input_mask": true,
+ "tfa.layers.InstanceNormalization.input_shape": true,
+ "tfa.layers.InstanceNormalization.input_spec": true,
+ "tfa.layers.InstanceNormalization.losses": true,
+ "tfa.layers.InstanceNormalization.metrics": true,
+ "tfa.layers.InstanceNormalization.name": true,
+ "tfa.layers.InstanceNormalization.name_scope": true,
+ "tfa.layers.InstanceNormalization.non_trainable_variables": true,
+ "tfa.layers.InstanceNormalization.non_trainable_weights": true,
+ "tfa.layers.InstanceNormalization.output": true,
+ "tfa.layers.InstanceNormalization.output_mask": true,
+ "tfa.layers.InstanceNormalization.output_shape": true,
+ "tfa.layers.InstanceNormalization.set_weights": true,
+ "tfa.layers.InstanceNormalization.submodules": true,
+ "tfa.layers.InstanceNormalization.trainable": true,
+ "tfa.layers.InstanceNormalization.trainable_variables": true,
+ "tfa.layers.InstanceNormalization.trainable_weights": true,
+ "tfa.layers.InstanceNormalization.updates": true,
+ "tfa.layers.InstanceNormalization.variables": true,
+ "tfa.layers.InstanceNormalization.weights": true,
+ "tfa.layers.InstanceNormalization.with_name_scope": true,
+ "tfa.layers.Maxout": false,
+ "tfa.layers.Maxout.__call__": true,
+ "tfa.layers.Maxout.__eq__": true,
+ "tfa.layers.Maxout.__ge__": true,
+ "tfa.layers.Maxout.__gt__": true,
+ "tfa.layers.Maxout.__init__": true,
+ "tfa.layers.Maxout.__le__": true,
+ "tfa.layers.Maxout.__lt__": true,
+ "tfa.layers.Maxout.__ne__": true,
+ "tfa.layers.Maxout.__new__": true,
+ "tfa.layers.Maxout.activity_regularizer": true,
+ "tfa.layers.Maxout.add_loss": true,
+ "tfa.layers.Maxout.add_metric": true,
+ "tfa.layers.Maxout.add_update": true,
+ "tfa.layers.Maxout.add_weight": true,
+ "tfa.layers.Maxout.build": true,
+ "tfa.layers.Maxout.call": true,
+ "tfa.layers.Maxout.compute_mask": true,
+ "tfa.layers.Maxout.compute_output_shape": true,
+ "tfa.layers.Maxout.compute_output_signature": true,
+ "tfa.layers.Maxout.count_params": true,
+ "tfa.layers.Maxout.dtype": true,
+ "tfa.layers.Maxout.dynamic": true,
+ "tfa.layers.Maxout.from_config": true,
+ "tfa.layers.Maxout.get_config": true,
+ "tfa.layers.Maxout.get_input_at": true,
+ "tfa.layers.Maxout.get_input_mask_at": true,
+ "tfa.layers.Maxout.get_input_shape_at": true,
+ "tfa.layers.Maxout.get_losses_for": true,
+ "tfa.layers.Maxout.get_output_at": true,
+ "tfa.layers.Maxout.get_output_mask_at": true,
+ "tfa.layers.Maxout.get_output_shape_at": true,
+ "tfa.layers.Maxout.get_updates_for": true,
+ "tfa.layers.Maxout.get_weights": true,
+ "tfa.layers.Maxout.input": true,
+ "tfa.layers.Maxout.input_mask": true,
+ "tfa.layers.Maxout.input_shape": true,
+ "tfa.layers.Maxout.input_spec": true,
+ "tfa.layers.Maxout.losses": true,
+ "tfa.layers.Maxout.metrics": true,
+ "tfa.layers.Maxout.name": true,
+ "tfa.layers.Maxout.name_scope": true,
+ "tfa.layers.Maxout.non_trainable_variables": true,
+ "tfa.layers.Maxout.non_trainable_weights": true,
+ "tfa.layers.Maxout.output": true,
+ "tfa.layers.Maxout.output_mask": true,
+ "tfa.layers.Maxout.output_shape": true,
+ "tfa.layers.Maxout.set_weights": true,
+ "tfa.layers.Maxout.submodules": true,
+ "tfa.layers.Maxout.trainable": true,
+ "tfa.layers.Maxout.trainable_variables": true,
+ "tfa.layers.Maxout.trainable_weights": true,
+ "tfa.layers.Maxout.updates": true,
+ "tfa.layers.Maxout.variables": true,
+ "tfa.layers.Maxout.weights": true,
+ "tfa.layers.Maxout.with_name_scope": true,
+ "tfa.layers.PoincareNormalize": false,
+ "tfa.layers.PoincareNormalize.__call__": true,
+ "tfa.layers.PoincareNormalize.__eq__": true,
+ "tfa.layers.PoincareNormalize.__ge__": true,
+ "tfa.layers.PoincareNormalize.__gt__": true,
+ "tfa.layers.PoincareNormalize.__init__": true,
+ "tfa.layers.PoincareNormalize.__le__": true,
+ "tfa.layers.PoincareNormalize.__lt__": true,
+ "tfa.layers.PoincareNormalize.__ne__": true,
+ "tfa.layers.PoincareNormalize.__new__": true,
+ "tfa.layers.PoincareNormalize.activity_regularizer": true,
+ "tfa.layers.PoincareNormalize.add_loss": true,
+ "tfa.layers.PoincareNormalize.add_metric": true,
+ "tfa.layers.PoincareNormalize.add_update": true,
+ "tfa.layers.PoincareNormalize.add_weight": true,
+ "tfa.layers.PoincareNormalize.build": true,
+ "tfa.layers.PoincareNormalize.call": true,
+ "tfa.layers.PoincareNormalize.compute_mask": true,
+ "tfa.layers.PoincareNormalize.compute_output_shape": true,
+ "tfa.layers.PoincareNormalize.compute_output_signature": true,
+ "tfa.layers.PoincareNormalize.count_params": true,
+ "tfa.layers.PoincareNormalize.dtype": true,
+ "tfa.layers.PoincareNormalize.dynamic": true,
+ "tfa.layers.PoincareNormalize.from_config": true,
+ "tfa.layers.PoincareNormalize.get_config": true,
+ "tfa.layers.PoincareNormalize.get_input_at": true,
+ "tfa.layers.PoincareNormalize.get_input_mask_at": true,
+ "tfa.layers.PoincareNormalize.get_input_shape_at": true,
+ "tfa.layers.PoincareNormalize.get_losses_for": true,
+ "tfa.layers.PoincareNormalize.get_output_at": true,
+ "tfa.layers.PoincareNormalize.get_output_mask_at": true,
+ "tfa.layers.PoincareNormalize.get_output_shape_at": true,
+ "tfa.layers.PoincareNormalize.get_updates_for": true,
+ "tfa.layers.PoincareNormalize.get_weights": true,
+ "tfa.layers.PoincareNormalize.input": true,
+ "tfa.layers.PoincareNormalize.input_mask": true,
+ "tfa.layers.PoincareNormalize.input_shape": true,
+ "tfa.layers.PoincareNormalize.input_spec": true,
+ "tfa.layers.PoincareNormalize.losses": true,
+ "tfa.layers.PoincareNormalize.metrics": true,
+ "tfa.layers.PoincareNormalize.name": true,
+ "tfa.layers.PoincareNormalize.name_scope": true,
+ "tfa.layers.PoincareNormalize.non_trainable_variables": true,
+ "tfa.layers.PoincareNormalize.non_trainable_weights": true,
+ "tfa.layers.PoincareNormalize.output": true,
+ "tfa.layers.PoincareNormalize.output_mask": true,
+ "tfa.layers.PoincareNormalize.output_shape": true,
+ "tfa.layers.PoincareNormalize.set_weights": true,
+ "tfa.layers.PoincareNormalize.submodules": true,
+ "tfa.layers.PoincareNormalize.trainable": true,
+ "tfa.layers.PoincareNormalize.trainable_variables": true,
+ "tfa.layers.PoincareNormalize.trainable_weights": true,
+ "tfa.layers.PoincareNormalize.updates": true,
+ "tfa.layers.PoincareNormalize.variables": true,
+ "tfa.layers.PoincareNormalize.weights": true,
+ "tfa.layers.PoincareNormalize.with_name_scope": true,
+ "tfa.layers.Sparsemax": false,
+ "tfa.layers.Sparsemax.__call__": true,
+ "tfa.layers.Sparsemax.__eq__": true,
+ "tfa.layers.Sparsemax.__ge__": true,
+ "tfa.layers.Sparsemax.__gt__": true,
+ "tfa.layers.Sparsemax.__init__": true,
+ "tfa.layers.Sparsemax.__le__": true,
+ "tfa.layers.Sparsemax.__lt__": true,
+ "tfa.layers.Sparsemax.__ne__": true,
+ "tfa.layers.Sparsemax.__new__": true,
+ "tfa.layers.Sparsemax.activity_regularizer": true,
+ "tfa.layers.Sparsemax.add_loss": true,
+ "tfa.layers.Sparsemax.add_metric": true,
+ "tfa.layers.Sparsemax.add_update": true,
+ "tfa.layers.Sparsemax.add_weight": true,
+ "tfa.layers.Sparsemax.build": true,
+ "tfa.layers.Sparsemax.call": true,
+ "tfa.layers.Sparsemax.compute_mask": true,
+ "tfa.layers.Sparsemax.compute_output_shape": true,
+ "tfa.layers.Sparsemax.compute_output_signature": true,
+ "tfa.layers.Sparsemax.count_params": true,
+ "tfa.layers.Sparsemax.dtype": true,
+ "tfa.layers.Sparsemax.dynamic": true,
+ "tfa.layers.Sparsemax.from_config": true,
+ "tfa.layers.Sparsemax.get_config": true,
+ "tfa.layers.Sparsemax.get_input_at": true,
+ "tfa.layers.Sparsemax.get_input_mask_at": true,
+ "tfa.layers.Sparsemax.get_input_shape_at": true,
+ "tfa.layers.Sparsemax.get_losses_for": true,
+ "tfa.layers.Sparsemax.get_output_at": true,
+ "tfa.layers.Sparsemax.get_output_mask_at": true,
+ "tfa.layers.Sparsemax.get_output_shape_at": true,
+ "tfa.layers.Sparsemax.get_updates_for": true,
+ "tfa.layers.Sparsemax.get_weights": true,
+ "tfa.layers.Sparsemax.input": true,
+ "tfa.layers.Sparsemax.input_mask": true,
+ "tfa.layers.Sparsemax.input_shape": true,
+ "tfa.layers.Sparsemax.input_spec": true,
+ "tfa.layers.Sparsemax.losses": true,
+ "tfa.layers.Sparsemax.metrics": true,
+ "tfa.layers.Sparsemax.name": true,
+ "tfa.layers.Sparsemax.name_scope": true,
+ "tfa.layers.Sparsemax.non_trainable_variables": true,
+ "tfa.layers.Sparsemax.non_trainable_weights": true,
+ "tfa.layers.Sparsemax.output": true,
+ "tfa.layers.Sparsemax.output_mask": true,
+ "tfa.layers.Sparsemax.output_shape": true,
+ "tfa.layers.Sparsemax.set_weights": true,
+ "tfa.layers.Sparsemax.submodules": true,
+ "tfa.layers.Sparsemax.trainable": true,
+ "tfa.layers.Sparsemax.trainable_variables": true,
+ "tfa.layers.Sparsemax.trainable_weights": true,
+ "tfa.layers.Sparsemax.updates": true,
+ "tfa.layers.Sparsemax.variables": true,
+ "tfa.layers.Sparsemax.weights": true,
+ "tfa.layers.Sparsemax.with_name_scope": true,
+ "tfa.layers.WeightNormalization": false,
+ "tfa.layers.WeightNormalization.__call__": true,
+ "tfa.layers.WeightNormalization.__eq__": true,
+ "tfa.layers.WeightNormalization.__ge__": true,
+ "tfa.layers.WeightNormalization.__gt__": true,
+ "tfa.layers.WeightNormalization.__init__": true,
+ "tfa.layers.WeightNormalization.__le__": true,
+ "tfa.layers.WeightNormalization.__lt__": true,
+ "tfa.layers.WeightNormalization.__ne__": true,
+ "tfa.layers.WeightNormalization.__new__": true,
+ "tfa.layers.WeightNormalization.activity_regularizer": true,
+ "tfa.layers.WeightNormalization.add_loss": true,
+ "tfa.layers.WeightNormalization.add_metric": true,
+ "tfa.layers.WeightNormalization.add_update": true,
+ "tfa.layers.WeightNormalization.add_weight": true,
+ "tfa.layers.WeightNormalization.build": true,
+ "tfa.layers.WeightNormalization.call": true,
+ "tfa.layers.WeightNormalization.compute_mask": true,
+ "tfa.layers.WeightNormalization.compute_output_shape": true,
+ "tfa.layers.WeightNormalization.compute_output_signature": true,
+ "tfa.layers.WeightNormalization.count_params": true,
+ "tfa.layers.WeightNormalization.dtype": true,
+ "tfa.layers.WeightNormalization.dynamic": true,
+ "tfa.layers.WeightNormalization.from_config": true,
+ "tfa.layers.WeightNormalization.get_config": true,
+ "tfa.layers.WeightNormalization.get_input_at": true,
+ "tfa.layers.WeightNormalization.get_input_mask_at": true,
+ "tfa.layers.WeightNormalization.get_input_shape_at": true,
+ "tfa.layers.WeightNormalization.get_losses_for": true,
+ "tfa.layers.WeightNormalization.get_output_at": true,
+ "tfa.layers.WeightNormalization.get_output_mask_at": true,
+ "tfa.layers.WeightNormalization.get_output_shape_at": true,
+ "tfa.layers.WeightNormalization.get_updates_for": true,
+ "tfa.layers.WeightNormalization.get_weights": true,
+ "tfa.layers.WeightNormalization.input": true,
+ "tfa.layers.WeightNormalization.input_mask": true,
+ "tfa.layers.WeightNormalization.input_shape": true,
+ "tfa.layers.WeightNormalization.input_spec": true,
+ "tfa.layers.WeightNormalization.losses": true,
+ "tfa.layers.WeightNormalization.metrics": true,
+ "tfa.layers.WeightNormalization.name": true,
+ "tfa.layers.WeightNormalization.name_scope": true,
+ "tfa.layers.WeightNormalization.non_trainable_variables": true,
+ "tfa.layers.WeightNormalization.non_trainable_weights": true,
+ "tfa.layers.WeightNormalization.output": true,
+ "tfa.layers.WeightNormalization.output_mask": true,
+ "tfa.layers.WeightNormalization.output_shape": true,
+ "tfa.layers.WeightNormalization.remove": true,
+ "tfa.layers.WeightNormalization.set_weights": true,
+ "tfa.layers.WeightNormalization.submodules": true,
+ "tfa.layers.WeightNormalization.trainable": true,
+ "tfa.layers.WeightNormalization.trainable_variables": true,
+ "tfa.layers.WeightNormalization.trainable_weights": true,
+ "tfa.layers.WeightNormalization.updates": true,
+ "tfa.layers.WeightNormalization.variables": true,
+ "tfa.layers.WeightNormalization.weights": true,
+ "tfa.layers.WeightNormalization.with_name_scope": true,
+ "tfa.layers.absolute_import": true,
+ "tfa.layers.division": true,
+ "tfa.layers.gelu": false,
+ "tfa.layers.gelu.GELU": false,
+ "tfa.layers.gelu.GELU.__call__": true,
+ "tfa.layers.gelu.GELU.__eq__": true,
+ "tfa.layers.gelu.GELU.__ge__": true,
+ "tfa.layers.gelu.GELU.__gt__": true,
+ "tfa.layers.gelu.GELU.__init__": true,
+ "tfa.layers.gelu.GELU.__le__": true,
+ "tfa.layers.gelu.GELU.__lt__": true,
+ "tfa.layers.gelu.GELU.__ne__": true,
+ "tfa.layers.gelu.GELU.__new__": true,
+ "tfa.layers.gelu.GELU.activity_regularizer": true,
+ "tfa.layers.gelu.GELU.add_loss": true,
+ "tfa.layers.gelu.GELU.add_metric": true,
+ "tfa.layers.gelu.GELU.add_update": true,
+ "tfa.layers.gelu.GELU.add_weight": true,
+ "tfa.layers.gelu.GELU.build": true,
+ "tfa.layers.gelu.GELU.call": true,
+ "tfa.layers.gelu.GELU.compute_mask": true,
+ "tfa.layers.gelu.GELU.compute_output_shape": true,
+ "tfa.layers.gelu.GELU.compute_output_signature": true,
+ "tfa.layers.gelu.GELU.count_params": true,
+ "tfa.layers.gelu.GELU.dtype": true,
+ "tfa.layers.gelu.GELU.dynamic": true,
+ "tfa.layers.gelu.GELU.from_config": true,
+ "tfa.layers.gelu.GELU.get_config": true,
+ "tfa.layers.gelu.GELU.get_input_at": true,
+ "tfa.layers.gelu.GELU.get_input_mask_at": true,
+ "tfa.layers.gelu.GELU.get_input_shape_at": true,
+ "tfa.layers.gelu.GELU.get_losses_for": true,
+ "tfa.layers.gelu.GELU.get_output_at": true,
+ "tfa.layers.gelu.GELU.get_output_mask_at": true,
+ "tfa.layers.gelu.GELU.get_output_shape_at": true,
+ "tfa.layers.gelu.GELU.get_updates_for": true,
+ "tfa.layers.gelu.GELU.get_weights": true,
+ "tfa.layers.gelu.GELU.input": true,
+ "tfa.layers.gelu.GELU.input_mask": true,
+ "tfa.layers.gelu.GELU.input_shape": true,
+ "tfa.layers.gelu.GELU.input_spec": true,
+ "tfa.layers.gelu.GELU.losses": true,
+ "tfa.layers.gelu.GELU.metrics": true,
+ "tfa.layers.gelu.GELU.name": true,
+ "tfa.layers.gelu.GELU.name_scope": true,
+ "tfa.layers.gelu.GELU.non_trainable_variables": true,
+ "tfa.layers.gelu.GELU.non_trainable_weights": true,
+ "tfa.layers.gelu.GELU.output": true,
+ "tfa.layers.gelu.GELU.output_mask": true,
+ "tfa.layers.gelu.GELU.output_shape": true,
+ "tfa.layers.gelu.GELU.set_weights": true,
+ "tfa.layers.gelu.GELU.submodules": true,
+ "tfa.layers.gelu.GELU.trainable": true,
+ "tfa.layers.gelu.GELU.trainable_variables": true,
+ "tfa.layers.gelu.GELU.trainable_weights": true,
+ "tfa.layers.gelu.GELU.updates": true,
+ "tfa.layers.gelu.GELU.variables": true,
+ "tfa.layers.gelu.GELU.weights": true,
+ "tfa.layers.gelu.GELU.with_name_scope": true,
+ "tfa.layers.gelu.absolute_import": true,
+ "tfa.layers.gelu.division": true,
+ "tfa.layers.gelu.print_function": true,
+ "tfa.layers.maxout": false,
+ "tfa.layers.maxout.Maxout": false,
+ "tfa.layers.maxout.Maxout.__call__": true,
+ "tfa.layers.maxout.Maxout.__eq__": true,
+ "tfa.layers.maxout.Maxout.__ge__": true,
+ "tfa.layers.maxout.Maxout.__gt__": true,
+ "tfa.layers.maxout.Maxout.__init__": true,
+ "tfa.layers.maxout.Maxout.__le__": true,
+ "tfa.layers.maxout.Maxout.__lt__": true,
+ "tfa.layers.maxout.Maxout.__ne__": true,
+ "tfa.layers.maxout.Maxout.__new__": true,
+ "tfa.layers.maxout.Maxout.activity_regularizer": true,
+ "tfa.layers.maxout.Maxout.add_loss": true,
+ "tfa.layers.maxout.Maxout.add_metric": true,
+ "tfa.layers.maxout.Maxout.add_update": true,
+ "tfa.layers.maxout.Maxout.add_weight": true,
+ "tfa.layers.maxout.Maxout.build": true,
+ "tfa.layers.maxout.Maxout.call": true,
+ "tfa.layers.maxout.Maxout.compute_mask": true,
+ "tfa.layers.maxout.Maxout.compute_output_shape": true,
+ "tfa.layers.maxout.Maxout.compute_output_signature": true,
+ "tfa.layers.maxout.Maxout.count_params": true,
+ "tfa.layers.maxout.Maxout.dtype": true,
+ "tfa.layers.maxout.Maxout.dynamic": true,
+ "tfa.layers.maxout.Maxout.from_config": true,
+ "tfa.layers.maxout.Maxout.get_config": true,
+ "tfa.layers.maxout.Maxout.get_input_at": true,
+ "tfa.layers.maxout.Maxout.get_input_mask_at": true,
+ "tfa.layers.maxout.Maxout.get_input_shape_at": true,
+ "tfa.layers.maxout.Maxout.get_losses_for": true,
+ "tfa.layers.maxout.Maxout.get_output_at": true,
+ "tfa.layers.maxout.Maxout.get_output_mask_at": true,
+ "tfa.layers.maxout.Maxout.get_output_shape_at": true,
+ "tfa.layers.maxout.Maxout.get_updates_for": true,
+ "tfa.layers.maxout.Maxout.get_weights": true,
+ "tfa.layers.maxout.Maxout.input": true,
+ "tfa.layers.maxout.Maxout.input_mask": true,
+ "tfa.layers.maxout.Maxout.input_shape": true,
+ "tfa.layers.maxout.Maxout.input_spec": true,
+ "tfa.layers.maxout.Maxout.losses": true,
+ "tfa.layers.maxout.Maxout.metrics": true,
+ "tfa.layers.maxout.Maxout.name": true,
+ "tfa.layers.maxout.Maxout.name_scope": true,
+ "tfa.layers.maxout.Maxout.non_trainable_variables": true,
+ "tfa.layers.maxout.Maxout.non_trainable_weights": true,
+ "tfa.layers.maxout.Maxout.output": true,
+ "tfa.layers.maxout.Maxout.output_mask": true,
+ "tfa.layers.maxout.Maxout.output_shape": true,
+ "tfa.layers.maxout.Maxout.set_weights": true,
+ "tfa.layers.maxout.Maxout.submodules": true,
+ "tfa.layers.maxout.Maxout.trainable": true,
+ "tfa.layers.maxout.Maxout.trainable_variables": true,
+ "tfa.layers.maxout.Maxout.trainable_weights": true,
+ "tfa.layers.maxout.Maxout.updates": true,
+ "tfa.layers.maxout.Maxout.variables": true,
+ "tfa.layers.maxout.Maxout.weights": true,
+ "tfa.layers.maxout.Maxout.with_name_scope": true,
+ "tfa.layers.maxout.absolute_import": true,
+ "tfa.layers.maxout.division": true,
+ "tfa.layers.maxout.print_function": true,
+ "tfa.layers.normalizations": false,
+ "tfa.layers.normalizations.GroupNormalization": false,
+ "tfa.layers.normalizations.GroupNormalization.__call__": true,
+ "tfa.layers.normalizations.GroupNormalization.__eq__": true,
+ "tfa.layers.normalizations.GroupNormalization.__ge__": true,
+ "tfa.layers.normalizations.GroupNormalization.__gt__": true,
+ "tfa.layers.normalizations.GroupNormalization.__init__": true,
+ "tfa.layers.normalizations.GroupNormalization.__le__": true,
+ "tfa.layers.normalizations.GroupNormalization.__lt__": true,
+ "tfa.layers.normalizations.GroupNormalization.__ne__": true,
+ "tfa.layers.normalizations.GroupNormalization.__new__": true,
+ "tfa.layers.normalizations.GroupNormalization.activity_regularizer": true,
+ "tfa.layers.normalizations.GroupNormalization.add_loss": true,
+ "tfa.layers.normalizations.GroupNormalization.add_metric": true,
+ "tfa.layers.normalizations.GroupNormalization.add_update": true,
+ "tfa.layers.normalizations.GroupNormalization.add_weight": true,
+ "tfa.layers.normalizations.GroupNormalization.build": true,
+ "tfa.layers.normalizations.GroupNormalization.call": true,
+ "tfa.layers.normalizations.GroupNormalization.compute_mask": true,
+ "tfa.layers.normalizations.GroupNormalization.compute_output_shape": true,
+ "tfa.layers.normalizations.GroupNormalization.compute_output_signature": true,
+ "tfa.layers.normalizations.GroupNormalization.count_params": true,
+ "tfa.layers.normalizations.GroupNormalization.dtype": true,
+ "tfa.layers.normalizations.GroupNormalization.dynamic": true,
+ "tfa.layers.normalizations.GroupNormalization.from_config": true,
+ "tfa.layers.normalizations.GroupNormalization.get_config": true,
+ "tfa.layers.normalizations.GroupNormalization.get_input_at": true,
+ "tfa.layers.normalizations.GroupNormalization.get_input_mask_at": true,
+ "tfa.layers.normalizations.GroupNormalization.get_input_shape_at": true,
+ "tfa.layers.normalizations.GroupNormalization.get_losses_for": true,
+ "tfa.layers.normalizations.GroupNormalization.get_output_at": true,
+ "tfa.layers.normalizations.GroupNormalization.get_output_mask_at": true,
+ "tfa.layers.normalizations.GroupNormalization.get_output_shape_at": true,
+ "tfa.layers.normalizations.GroupNormalization.get_updates_for": true,
+ "tfa.layers.normalizations.GroupNormalization.get_weights": true,
+ "tfa.layers.normalizations.GroupNormalization.input": true,
+ "tfa.layers.normalizations.GroupNormalization.input_mask": true,
+ "tfa.layers.normalizations.GroupNormalization.input_shape": true,
+ "tfa.layers.normalizations.GroupNormalization.input_spec": true,
+ "tfa.layers.normalizations.GroupNormalization.losses": true,
+ "tfa.layers.normalizations.GroupNormalization.metrics": true,
+ "tfa.layers.normalizations.GroupNormalization.name": true,
+ "tfa.layers.normalizations.GroupNormalization.name_scope": true,
+ "tfa.layers.normalizations.GroupNormalization.non_trainable_variables": true,
+ "tfa.layers.normalizations.GroupNormalization.non_trainable_weights": true,
+ "tfa.layers.normalizations.GroupNormalization.output": true,
+ "tfa.layers.normalizations.GroupNormalization.output_mask": true,
+ "tfa.layers.normalizations.GroupNormalization.output_shape": true,
+ "tfa.layers.normalizations.GroupNormalization.set_weights": true,
+ "tfa.layers.normalizations.GroupNormalization.submodules": true,
+ "tfa.layers.normalizations.GroupNormalization.trainable": true,
+ "tfa.layers.normalizations.GroupNormalization.trainable_variables": true,
+ "tfa.layers.normalizations.GroupNormalization.trainable_weights": true,
+ "tfa.layers.normalizations.GroupNormalization.updates": true,
+ "tfa.layers.normalizations.GroupNormalization.variables": true,
+ "tfa.layers.normalizations.GroupNormalization.weights": true,
+ "tfa.layers.normalizations.GroupNormalization.with_name_scope": true,
+ "tfa.layers.normalizations.InstanceNormalization": false,
+ "tfa.layers.normalizations.InstanceNormalization.__call__": true,
+ "tfa.layers.normalizations.InstanceNormalization.__eq__": true,
+ "tfa.layers.normalizations.InstanceNormalization.__ge__": true,
+ "tfa.layers.normalizations.InstanceNormalization.__gt__": true,
+ "tfa.layers.normalizations.InstanceNormalization.__init__": true,
+ "tfa.layers.normalizations.InstanceNormalization.__le__": true,
+ "tfa.layers.normalizations.InstanceNormalization.__lt__": true,
+ "tfa.layers.normalizations.InstanceNormalization.__ne__": true,
+ "tfa.layers.normalizations.InstanceNormalization.__new__": true,
+ "tfa.layers.normalizations.InstanceNormalization.activity_regularizer": true,
+ "tfa.layers.normalizations.InstanceNormalization.add_loss": true,
+ "tfa.layers.normalizations.InstanceNormalization.add_metric": true,
+ "tfa.layers.normalizations.InstanceNormalization.add_update": true,
+ "tfa.layers.normalizations.InstanceNormalization.add_weight": true,
+ "tfa.layers.normalizations.InstanceNormalization.build": true,
+ "tfa.layers.normalizations.InstanceNormalization.call": true,
+ "tfa.layers.normalizations.InstanceNormalization.compute_mask": true,
+ "tfa.layers.normalizations.InstanceNormalization.compute_output_shape": true,
+ "tfa.layers.normalizations.InstanceNormalization.compute_output_signature": true,
+ "tfa.layers.normalizations.InstanceNormalization.count_params": true,
+ "tfa.layers.normalizations.InstanceNormalization.dtype": true,
+ "tfa.layers.normalizations.InstanceNormalization.dynamic": true,
+ "tfa.layers.normalizations.InstanceNormalization.from_config": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_config": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_input_at": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_input_mask_at": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_input_shape_at": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_losses_for": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_output_at": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_output_mask_at": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_output_shape_at": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_updates_for": true,
+ "tfa.layers.normalizations.InstanceNormalization.get_weights": true,
+ "tfa.layers.normalizations.InstanceNormalization.input": true,
+ "tfa.layers.normalizations.InstanceNormalization.input_mask": true,
+ "tfa.layers.normalizations.InstanceNormalization.input_shape": true,
+ "tfa.layers.normalizations.InstanceNormalization.input_spec": true,
+ "tfa.layers.normalizations.InstanceNormalization.losses": true,
+ "tfa.layers.normalizations.InstanceNormalization.metrics": true,
+ "tfa.layers.normalizations.InstanceNormalization.name": true,
+ "tfa.layers.normalizations.InstanceNormalization.name_scope": true,
+ "tfa.layers.normalizations.InstanceNormalization.non_trainable_variables": true,
+ "tfa.layers.normalizations.InstanceNormalization.non_trainable_weights": true,
+ "tfa.layers.normalizations.InstanceNormalization.output": true,
+ "tfa.layers.normalizations.InstanceNormalization.output_mask": true,
+ "tfa.layers.normalizations.InstanceNormalization.output_shape": true,
+ "tfa.layers.normalizations.InstanceNormalization.set_weights": true,
+ "tfa.layers.normalizations.InstanceNormalization.submodules": true,
+ "tfa.layers.normalizations.InstanceNormalization.trainable": true,
+ "tfa.layers.normalizations.InstanceNormalization.trainable_variables": true,
+ "tfa.layers.normalizations.InstanceNormalization.trainable_weights": true,
+ "tfa.layers.normalizations.InstanceNormalization.updates": true,
+ "tfa.layers.normalizations.InstanceNormalization.variables": true,
+ "tfa.layers.normalizations.InstanceNormalization.weights": true,
+ "tfa.layers.normalizations.InstanceNormalization.with_name_scope": true,
+ "tfa.layers.normalizations.absolute_import": true,
+ "tfa.layers.normalizations.division": true,
+ "tfa.layers.normalizations.print_function": true,
+ "tfa.layers.optical_flow": false,
+ "tfa.layers.optical_flow.CorrelationCost": false,
+ "tfa.layers.optical_flow.CorrelationCost.__call__": true,
+ "tfa.layers.optical_flow.CorrelationCost.__eq__": true,
+ "tfa.layers.optical_flow.CorrelationCost.__ge__": true,
+ "tfa.layers.optical_flow.CorrelationCost.__gt__": true,
+ "tfa.layers.optical_flow.CorrelationCost.__init__": true,
+ "tfa.layers.optical_flow.CorrelationCost.__le__": true,
+ "tfa.layers.optical_flow.CorrelationCost.__lt__": true,
+ "tfa.layers.optical_flow.CorrelationCost.__ne__": true,
+ "tfa.layers.optical_flow.CorrelationCost.__new__": true,
+ "tfa.layers.optical_flow.CorrelationCost.activity_regularizer": true,
+ "tfa.layers.optical_flow.CorrelationCost.add_loss": true,
+ "tfa.layers.optical_flow.CorrelationCost.add_metric": true,
+ "tfa.layers.optical_flow.CorrelationCost.add_update": true,
+ "tfa.layers.optical_flow.CorrelationCost.add_weight": true,
+ "tfa.layers.optical_flow.CorrelationCost.build": true,
+ "tfa.layers.optical_flow.CorrelationCost.call": true,
+ "tfa.layers.optical_flow.CorrelationCost.compute_mask": true,
+ "tfa.layers.optical_flow.CorrelationCost.compute_output_shape": true,
+ "tfa.layers.optical_flow.CorrelationCost.compute_output_signature": true,
+ "tfa.layers.optical_flow.CorrelationCost.count_params": true,
+ "tfa.layers.optical_flow.CorrelationCost.dtype": true,
+ "tfa.layers.optical_flow.CorrelationCost.dynamic": true,
+ "tfa.layers.optical_flow.CorrelationCost.from_config": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_config": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_input_at": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_input_mask_at": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_input_shape_at": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_losses_for": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_output_at": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_output_mask_at": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_output_shape_at": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_updates_for": true,
+ "tfa.layers.optical_flow.CorrelationCost.get_weights": true,
+ "tfa.layers.optical_flow.CorrelationCost.input": true,
+ "tfa.layers.optical_flow.CorrelationCost.input_mask": true,
+ "tfa.layers.optical_flow.CorrelationCost.input_shape": true,
+ "tfa.layers.optical_flow.CorrelationCost.input_spec": true,
+ "tfa.layers.optical_flow.CorrelationCost.losses": true,
+ "tfa.layers.optical_flow.CorrelationCost.metrics": true,
+ "tfa.layers.optical_flow.CorrelationCost.name": true,
+ "tfa.layers.optical_flow.CorrelationCost.name_scope": true,
+ "tfa.layers.optical_flow.CorrelationCost.non_trainable_variables": true,
+ "tfa.layers.optical_flow.CorrelationCost.non_trainable_weights": true,
+ "tfa.layers.optical_flow.CorrelationCost.output": true,
+ "tfa.layers.optical_flow.CorrelationCost.output_mask": true,
+ "tfa.layers.optical_flow.CorrelationCost.output_shape": true,
+ "tfa.layers.optical_flow.CorrelationCost.set_weights": true,
+ "tfa.layers.optical_flow.CorrelationCost.submodules": true,
+ "tfa.layers.optical_flow.CorrelationCost.trainable": true,
+ "tfa.layers.optical_flow.CorrelationCost.trainable_variables": true,
+ "tfa.layers.optical_flow.CorrelationCost.trainable_weights": true,
+ "tfa.layers.optical_flow.CorrelationCost.updates": true,
+ "tfa.layers.optical_flow.CorrelationCost.variables": true,
+ "tfa.layers.optical_flow.CorrelationCost.weights": true,
+ "tfa.layers.optical_flow.CorrelationCost.with_name_scope": true,
+ "tfa.layers.optical_flow.absolute_import": true,
+ "tfa.layers.optical_flow.division": true,
+ "tfa.layers.optical_flow.print_function": true,
+ "tfa.layers.poincare": false,
+ "tfa.layers.poincare.PoincareNormalize": false,
+ "tfa.layers.poincare.PoincareNormalize.__call__": true,
+ "tfa.layers.poincare.PoincareNormalize.__eq__": true,
+ "tfa.layers.poincare.PoincareNormalize.__ge__": true,
+ "tfa.layers.poincare.PoincareNormalize.__gt__": true,
+ "tfa.layers.poincare.PoincareNormalize.__init__": true,
+ "tfa.layers.poincare.PoincareNormalize.__le__": true,
+ "tfa.layers.poincare.PoincareNormalize.__lt__": true,
+ "tfa.layers.poincare.PoincareNormalize.__ne__": true,
+ "tfa.layers.poincare.PoincareNormalize.__new__": true,
+ "tfa.layers.poincare.PoincareNormalize.activity_regularizer": true,
+ "tfa.layers.poincare.PoincareNormalize.add_loss": true,
+ "tfa.layers.poincare.PoincareNormalize.add_metric": true,
+ "tfa.layers.poincare.PoincareNormalize.add_update": true,
+ "tfa.layers.poincare.PoincareNormalize.add_weight": true,
+ "tfa.layers.poincare.PoincareNormalize.build": true,
+ "tfa.layers.poincare.PoincareNormalize.call": true,
+ "tfa.layers.poincare.PoincareNormalize.compute_mask": true,
+ "tfa.layers.poincare.PoincareNormalize.compute_output_shape": true,
+ "tfa.layers.poincare.PoincareNormalize.compute_output_signature": true,
+ "tfa.layers.poincare.PoincareNormalize.count_params": true,
+ "tfa.layers.poincare.PoincareNormalize.dtype": true,
+ "tfa.layers.poincare.PoincareNormalize.dynamic": true,
+ "tfa.layers.poincare.PoincareNormalize.from_config": true,
+ "tfa.layers.poincare.PoincareNormalize.get_config": true,
+ "tfa.layers.poincare.PoincareNormalize.get_input_at": true,
+ "tfa.layers.poincare.PoincareNormalize.get_input_mask_at": true,
+ "tfa.layers.poincare.PoincareNormalize.get_input_shape_at": true,
+ "tfa.layers.poincare.PoincareNormalize.get_losses_for": true,
+ "tfa.layers.poincare.PoincareNormalize.get_output_at": true,
+ "tfa.layers.poincare.PoincareNormalize.get_output_mask_at": true,
+ "tfa.layers.poincare.PoincareNormalize.get_output_shape_at": true,
+ "tfa.layers.poincare.PoincareNormalize.get_updates_for": true,
+ "tfa.layers.poincare.PoincareNormalize.get_weights": true,
+ "tfa.layers.poincare.PoincareNormalize.input": true,
+ "tfa.layers.poincare.PoincareNormalize.input_mask": true,
+ "tfa.layers.poincare.PoincareNormalize.input_shape": true,
+ "tfa.layers.poincare.PoincareNormalize.input_spec": true,
+ "tfa.layers.poincare.PoincareNormalize.losses": true,
+ "tfa.layers.poincare.PoincareNormalize.metrics": true,
+ "tfa.layers.poincare.PoincareNormalize.name": true,
+ "tfa.layers.poincare.PoincareNormalize.name_scope": true,
+ "tfa.layers.poincare.PoincareNormalize.non_trainable_variables": true,
+ "tfa.layers.poincare.PoincareNormalize.non_trainable_weights": true,
+ "tfa.layers.poincare.PoincareNormalize.output": true,
+ "tfa.layers.poincare.PoincareNormalize.output_mask": true,
+ "tfa.layers.poincare.PoincareNormalize.output_shape": true,
+ "tfa.layers.poincare.PoincareNormalize.set_weights": true,
+ "tfa.layers.poincare.PoincareNormalize.submodules": true,
+ "tfa.layers.poincare.PoincareNormalize.trainable": true,
+ "tfa.layers.poincare.PoincareNormalize.trainable_variables": true,
+ "tfa.layers.poincare.PoincareNormalize.trainable_weights": true,
+ "tfa.layers.poincare.PoincareNormalize.updates": true,
+ "tfa.layers.poincare.PoincareNormalize.variables": true,
+ "tfa.layers.poincare.PoincareNormalize.weights": true,
+ "tfa.layers.poincare.PoincareNormalize.with_name_scope": true,
+ "tfa.layers.poincare.absolute_import": true,
+ "tfa.layers.poincare.division": true,
+ "tfa.layers.poincare.print_function": true,
+ "tfa.layers.print_function": true,
+ "tfa.layers.sparsemax": false,
+ "tfa.layers.sparsemax.Sparsemax": false,
+ "tfa.layers.sparsemax.Sparsemax.__call__": true,
+ "tfa.layers.sparsemax.Sparsemax.__eq__": true,
+ "tfa.layers.sparsemax.Sparsemax.__ge__": true,
+ "tfa.layers.sparsemax.Sparsemax.__gt__": true,
+ "tfa.layers.sparsemax.Sparsemax.__init__": true,
+ "tfa.layers.sparsemax.Sparsemax.__le__": true,
+ "tfa.layers.sparsemax.Sparsemax.__lt__": true,
+ "tfa.layers.sparsemax.Sparsemax.__ne__": true,
+ "tfa.layers.sparsemax.Sparsemax.__new__": true,
+ "tfa.layers.sparsemax.Sparsemax.activity_regularizer": true,
+ "tfa.layers.sparsemax.Sparsemax.add_loss": true,
+ "tfa.layers.sparsemax.Sparsemax.add_metric": true,
+ "tfa.layers.sparsemax.Sparsemax.add_update": true,
+ "tfa.layers.sparsemax.Sparsemax.add_weight": true,
+ "tfa.layers.sparsemax.Sparsemax.build": true,
+ "tfa.layers.sparsemax.Sparsemax.call": true,
+ "tfa.layers.sparsemax.Sparsemax.compute_mask": true,
+ "tfa.layers.sparsemax.Sparsemax.compute_output_shape": true,
+ "tfa.layers.sparsemax.Sparsemax.compute_output_signature": true,
+ "tfa.layers.sparsemax.Sparsemax.count_params": true,
+ "tfa.layers.sparsemax.Sparsemax.dtype": true,
+ "tfa.layers.sparsemax.Sparsemax.dynamic": true,
+ "tfa.layers.sparsemax.Sparsemax.from_config": true,
+ "tfa.layers.sparsemax.Sparsemax.get_config": true,
+ "tfa.layers.sparsemax.Sparsemax.get_input_at": true,
+ "tfa.layers.sparsemax.Sparsemax.get_input_mask_at": true,
+ "tfa.layers.sparsemax.Sparsemax.get_input_shape_at": true,
+ "tfa.layers.sparsemax.Sparsemax.get_losses_for": true,
+ "tfa.layers.sparsemax.Sparsemax.get_output_at": true,
+ "tfa.layers.sparsemax.Sparsemax.get_output_mask_at": true,
+ "tfa.layers.sparsemax.Sparsemax.get_output_shape_at": true,
+ "tfa.layers.sparsemax.Sparsemax.get_updates_for": true,
+ "tfa.layers.sparsemax.Sparsemax.get_weights": true,
+ "tfa.layers.sparsemax.Sparsemax.input": true,
+ "tfa.layers.sparsemax.Sparsemax.input_mask": true,
+ "tfa.layers.sparsemax.Sparsemax.input_shape": true,
+ "tfa.layers.sparsemax.Sparsemax.input_spec": true,
+ "tfa.layers.sparsemax.Sparsemax.losses": true,
+ "tfa.layers.sparsemax.Sparsemax.metrics": true,
+ "tfa.layers.sparsemax.Sparsemax.name": true,
+ "tfa.layers.sparsemax.Sparsemax.name_scope": true,
+ "tfa.layers.sparsemax.Sparsemax.non_trainable_variables": true,
+ "tfa.layers.sparsemax.Sparsemax.non_trainable_weights": true,
+ "tfa.layers.sparsemax.Sparsemax.output": true,
+ "tfa.layers.sparsemax.Sparsemax.output_mask": true,
+ "tfa.layers.sparsemax.Sparsemax.output_shape": true,
+ "tfa.layers.sparsemax.Sparsemax.set_weights": true,
+ "tfa.layers.sparsemax.Sparsemax.submodules": true,
+ "tfa.layers.sparsemax.Sparsemax.trainable": true,
+ "tfa.layers.sparsemax.Sparsemax.trainable_variables": true,
+ "tfa.layers.sparsemax.Sparsemax.trainable_weights": true,
+ "tfa.layers.sparsemax.Sparsemax.updates": true,
+ "tfa.layers.sparsemax.Sparsemax.variables": true,
+ "tfa.layers.sparsemax.Sparsemax.weights": true,
+ "tfa.layers.sparsemax.Sparsemax.with_name_scope": true,
+ "tfa.layers.sparsemax.absolute_import": true,
+ "tfa.layers.sparsemax.division": true,
+ "tfa.layers.sparsemax.print_function": true,
+ "tfa.layers.sparsemax.sparsemax": false,
+ "tfa.layers.wrappers": false,
+ "tfa.layers.wrappers.WeightNormalization": false,
+ "tfa.layers.wrappers.WeightNormalization.__call__": true,
+ "tfa.layers.wrappers.WeightNormalization.__eq__": true,
+ "tfa.layers.wrappers.WeightNormalization.__ge__": true,
+ "tfa.layers.wrappers.WeightNormalization.__gt__": true,
+ "tfa.layers.wrappers.WeightNormalization.__init__": true,
+ "tfa.layers.wrappers.WeightNormalization.__le__": true,
+ "tfa.layers.wrappers.WeightNormalization.__lt__": true,
+ "tfa.layers.wrappers.WeightNormalization.__ne__": true,
+ "tfa.layers.wrappers.WeightNormalization.__new__": true,
+ "tfa.layers.wrappers.WeightNormalization.activity_regularizer": true,
+ "tfa.layers.wrappers.WeightNormalization.add_loss": true,
+ "tfa.layers.wrappers.WeightNormalization.add_metric": true,
+ "tfa.layers.wrappers.WeightNormalization.add_update": true,
+ "tfa.layers.wrappers.WeightNormalization.add_weight": true,
+ "tfa.layers.wrappers.WeightNormalization.build": true,
+ "tfa.layers.wrappers.WeightNormalization.call": true,
+ "tfa.layers.wrappers.WeightNormalization.compute_mask": true,
+ "tfa.layers.wrappers.WeightNormalization.compute_output_shape": true,
+ "tfa.layers.wrappers.WeightNormalization.compute_output_signature": true,
+ "tfa.layers.wrappers.WeightNormalization.count_params": true,
+ "tfa.layers.wrappers.WeightNormalization.dtype": true,
+ "tfa.layers.wrappers.WeightNormalization.dynamic": true,
+ "tfa.layers.wrappers.WeightNormalization.from_config": true,
+ "tfa.layers.wrappers.WeightNormalization.get_config": true,
+ "tfa.layers.wrappers.WeightNormalization.get_input_at": true,
+ "tfa.layers.wrappers.WeightNormalization.get_input_mask_at": true,
+ "tfa.layers.wrappers.WeightNormalization.get_input_shape_at": true,
+ "tfa.layers.wrappers.WeightNormalization.get_losses_for": true,
+ "tfa.layers.wrappers.WeightNormalization.get_output_at": true,
+ "tfa.layers.wrappers.WeightNormalization.get_output_mask_at": true,
+ "tfa.layers.wrappers.WeightNormalization.get_output_shape_at": true,
+ "tfa.layers.wrappers.WeightNormalization.get_updates_for": true,
+ "tfa.layers.wrappers.WeightNormalization.get_weights": true,
+ "tfa.layers.wrappers.WeightNormalization.input": true,
+ "tfa.layers.wrappers.WeightNormalization.input_mask": true,
+ "tfa.layers.wrappers.WeightNormalization.input_shape": true,
+ "tfa.layers.wrappers.WeightNormalization.input_spec": true,
+ "tfa.layers.wrappers.WeightNormalization.losses": true,
+ "tfa.layers.wrappers.WeightNormalization.metrics": true,
+ "tfa.layers.wrappers.WeightNormalization.name": true,
+ "tfa.layers.wrappers.WeightNormalization.name_scope": true,
+ "tfa.layers.wrappers.WeightNormalization.non_trainable_variables": true,
+ "tfa.layers.wrappers.WeightNormalization.non_trainable_weights": true,
+ "tfa.layers.wrappers.WeightNormalization.output": true,
+ "tfa.layers.wrappers.WeightNormalization.output_mask": true,
+ "tfa.layers.wrappers.WeightNormalization.output_shape": true,
+ "tfa.layers.wrappers.WeightNormalization.remove": true,
+ "tfa.layers.wrappers.WeightNormalization.set_weights": true,
+ "tfa.layers.wrappers.WeightNormalization.submodules": true,
+ "tfa.layers.wrappers.WeightNormalization.trainable": true,
+ "tfa.layers.wrappers.WeightNormalization.trainable_variables": true,
+ "tfa.layers.wrappers.WeightNormalization.trainable_weights": true,
+ "tfa.layers.wrappers.WeightNormalization.updates": true,
+ "tfa.layers.wrappers.WeightNormalization.variables": true,
+ "tfa.layers.wrappers.WeightNormalization.weights": true,
+ "tfa.layers.wrappers.WeightNormalization.with_name_scope": true,
+ "tfa.layers.wrappers.absolute_import": true,
+ "tfa.layers.wrappers.division": true,
+ "tfa.layers.wrappers.print_function": true,
+ "tfa.losses": false,
+ "tfa.losses.ContrastiveLoss": false,
+ "tfa.losses.ContrastiveLoss.__call__": true,
+ "tfa.losses.ContrastiveLoss.__eq__": true,
+ "tfa.losses.ContrastiveLoss.__ge__": true,
+ "tfa.losses.ContrastiveLoss.__gt__": true,
+ "tfa.losses.ContrastiveLoss.__init__": true,
+ "tfa.losses.ContrastiveLoss.__le__": true,
+ "tfa.losses.ContrastiveLoss.__lt__": true,
+ "tfa.losses.ContrastiveLoss.__ne__": true,
+ "tfa.losses.ContrastiveLoss.__new__": true,
+ "tfa.losses.ContrastiveLoss.call": true,
+ "tfa.losses.ContrastiveLoss.from_config": true,
+ "tfa.losses.ContrastiveLoss.get_config": true,
+ "tfa.losses.GIoULoss": false,
+ "tfa.losses.GIoULoss.__call__": true,
+ "tfa.losses.GIoULoss.__eq__": true,
+ "tfa.losses.GIoULoss.__ge__": true,
+ "tfa.losses.GIoULoss.__gt__": true,
+ "tfa.losses.GIoULoss.__init__": true,
+ "tfa.losses.GIoULoss.__le__": true,
+ "tfa.losses.GIoULoss.__lt__": true,
+ "tfa.losses.GIoULoss.__ne__": true,
+ "tfa.losses.GIoULoss.__new__": true,
+ "tfa.losses.GIoULoss.call": true,
+ "tfa.losses.GIoULoss.from_config": true,
+ "tfa.losses.GIoULoss.get_config": true,
+ "tfa.losses.LiftedStructLoss": false,
+ "tfa.losses.LiftedStructLoss.__call__": true,
+ "tfa.losses.LiftedStructLoss.__eq__": true,
+ "tfa.losses.LiftedStructLoss.__ge__": true,
+ "tfa.losses.LiftedStructLoss.__gt__": true,
+ "tfa.losses.LiftedStructLoss.__init__": true,
+ "tfa.losses.LiftedStructLoss.__le__": true,
+ "tfa.losses.LiftedStructLoss.__lt__": true,
+ "tfa.losses.LiftedStructLoss.__ne__": true,
+ "tfa.losses.LiftedStructLoss.__new__": true,
+ "tfa.losses.LiftedStructLoss.call": true,
+ "tfa.losses.LiftedStructLoss.from_config": true,
+ "tfa.losses.LiftedStructLoss.get_config": true,
+ "tfa.losses.NpairsLoss": false,
+ "tfa.losses.NpairsLoss.__call__": true,
+ "tfa.losses.NpairsLoss.__eq__": true,
+ "tfa.losses.NpairsLoss.__ge__": true,
+ "tfa.losses.NpairsLoss.__gt__": true,
+ "tfa.losses.NpairsLoss.__init__": true,
+ "tfa.losses.NpairsLoss.__le__": true,
+ "tfa.losses.NpairsLoss.__lt__": true,
+ "tfa.losses.NpairsLoss.__ne__": true,
+ "tfa.losses.NpairsLoss.__new__": true,
+ "tfa.losses.NpairsLoss.call": true,
+ "tfa.losses.NpairsLoss.from_config": true,
+ "tfa.losses.NpairsLoss.get_config": true,
+ "tfa.losses.NpairsMultilabelLoss": false,
+ "tfa.losses.NpairsMultilabelLoss.__call__": true,
+ "tfa.losses.NpairsMultilabelLoss.__eq__": true,
+ "tfa.losses.NpairsMultilabelLoss.__ge__": true,
+ "tfa.losses.NpairsMultilabelLoss.__gt__": true,
+ "tfa.losses.NpairsMultilabelLoss.__init__": true,
+ "tfa.losses.NpairsMultilabelLoss.__le__": true,
+ "tfa.losses.NpairsMultilabelLoss.__lt__": true,
+ "tfa.losses.NpairsMultilabelLoss.__ne__": true,
+ "tfa.losses.NpairsMultilabelLoss.__new__": true,
+ "tfa.losses.NpairsMultilabelLoss.call": true,
+ "tfa.losses.NpairsMultilabelLoss.from_config": true,
+ "tfa.losses.NpairsMultilabelLoss.get_config": true,
+ "tfa.losses.SigmoidFocalCrossEntropy": false,
+ "tfa.losses.SigmoidFocalCrossEntropy.__call__": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.__eq__": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.__ge__": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.__gt__": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.__init__": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.__le__": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.__lt__": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.__ne__": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.__new__": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.call": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.from_config": true,
+ "tfa.losses.SigmoidFocalCrossEntropy.get_config": true,
+ "tfa.losses.SparsemaxLoss": false,
+ "tfa.losses.SparsemaxLoss.__call__": true,
+ "tfa.losses.SparsemaxLoss.__eq__": true,
+ "tfa.losses.SparsemaxLoss.__ge__": true,
+ "tfa.losses.SparsemaxLoss.__gt__": true,
+ "tfa.losses.SparsemaxLoss.__init__": true,
+ "tfa.losses.SparsemaxLoss.__le__": true,
+ "tfa.losses.SparsemaxLoss.__lt__": true,
+ "tfa.losses.SparsemaxLoss.__ne__": true,
+ "tfa.losses.SparsemaxLoss.__new__": true,
+ "tfa.losses.SparsemaxLoss.call": true,
+ "tfa.losses.SparsemaxLoss.from_config": true,
+ "tfa.losses.SparsemaxLoss.get_config": true,
+ "tfa.losses.TripletSemiHardLoss": false,
+ "tfa.losses.TripletSemiHardLoss.__call__": true,
+ "tfa.losses.TripletSemiHardLoss.__eq__": true,
+ "tfa.losses.TripletSemiHardLoss.__ge__": true,
+ "tfa.losses.TripletSemiHardLoss.__gt__": true,
+ "tfa.losses.TripletSemiHardLoss.__init__": true,
+ "tfa.losses.TripletSemiHardLoss.__le__": true,
+ "tfa.losses.TripletSemiHardLoss.__lt__": true,
+ "tfa.losses.TripletSemiHardLoss.__ne__": true,
+ "tfa.losses.TripletSemiHardLoss.__new__": true,
+ "tfa.losses.TripletSemiHardLoss.call": true,
+ "tfa.losses.TripletSemiHardLoss.from_config": true,
+ "tfa.losses.TripletSemiHardLoss.get_config": true,
+ "tfa.losses.absolute_import": true,
+ "tfa.losses.contrastive": false,
+ "tfa.losses.contrastive.ContrastiveLoss": false,
+ "tfa.losses.contrastive.ContrastiveLoss.__call__": true,
+ "tfa.losses.contrastive.ContrastiveLoss.__eq__": true,
+ "tfa.losses.contrastive.ContrastiveLoss.__ge__": true,
+ "tfa.losses.contrastive.ContrastiveLoss.__gt__": true,
+ "tfa.losses.contrastive.ContrastiveLoss.__init__": true,
+ "tfa.losses.contrastive.ContrastiveLoss.__le__": true,
+ "tfa.losses.contrastive.ContrastiveLoss.__lt__": true,
+ "tfa.losses.contrastive.ContrastiveLoss.__ne__": true,
+ "tfa.losses.contrastive.ContrastiveLoss.__new__": true,
+ "tfa.losses.contrastive.ContrastiveLoss.call": true,
+ "tfa.losses.contrastive.ContrastiveLoss.from_config": true,
+ "tfa.losses.contrastive.ContrastiveLoss.get_config": true,
+ "tfa.losses.contrastive.absolute_import": true,
+ "tfa.losses.contrastive.contrastive_loss": false,
+ "tfa.losses.contrastive.division": true,
+ "tfa.losses.contrastive.print_function": true,
+ "tfa.losses.contrastive_loss": false,
+ "tfa.losses.division": true,
+ "tfa.losses.focal_loss": false,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy": false,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__call__": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__eq__": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__ge__": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__gt__": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__init__": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__le__": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__lt__": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__ne__": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.__new__": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.call": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.from_config": true,
+ "tfa.losses.focal_loss.SigmoidFocalCrossEntropy.get_config": true,
+ "tfa.losses.focal_loss.absolute_import": true,
+ "tfa.losses.focal_loss.division": true,
+ "tfa.losses.focal_loss.print_function": true,
+ "tfa.losses.focal_loss.sigmoid_focal_crossentropy": false,
+ "tfa.losses.giou_loss": false,
+ "tfa.losses.lifted": false,
+ "tfa.losses.lifted.LiftedStructLoss": false,
+ "tfa.losses.lifted.LiftedStructLoss.__call__": true,
+ "tfa.losses.lifted.LiftedStructLoss.__eq__": true,
+ "tfa.losses.lifted.LiftedStructLoss.__ge__": true,
+ "tfa.losses.lifted.LiftedStructLoss.__gt__": true,
+ "tfa.losses.lifted.LiftedStructLoss.__init__": true,
+ "tfa.losses.lifted.LiftedStructLoss.__le__": true,
+ "tfa.losses.lifted.LiftedStructLoss.__lt__": true,
+ "tfa.losses.lifted.LiftedStructLoss.__ne__": true,
+ "tfa.losses.lifted.LiftedStructLoss.__new__": true,
+ "tfa.losses.lifted.LiftedStructLoss.call": true,
+ "tfa.losses.lifted.LiftedStructLoss.from_config": true,
+ "tfa.losses.lifted.LiftedStructLoss.get_config": true,
+ "tfa.losses.lifted.absolute_import": true,
+ "tfa.losses.lifted.division": true,
+ "tfa.losses.lifted.lifted_struct_loss": false,
+ "tfa.losses.lifted.print_function": true,
+ "tfa.losses.lifted_struct_loss": false,
+ "tfa.losses.metric_learning": false,
+ "tfa.losses.metric_learning.absolute_import": true,
+ "tfa.losses.metric_learning.division": true,
+ "tfa.losses.metric_learning.pairwise_distance": false,
+ "tfa.losses.metric_learning.print_function": true,
+ "tfa.losses.npairs": false,
+ "tfa.losses.npairs.NpairsLoss": false,
+ "tfa.losses.npairs.NpairsLoss.__call__": true,
+ "tfa.losses.npairs.NpairsLoss.__eq__": true,
+ "tfa.losses.npairs.NpairsLoss.__ge__": true,
+ "tfa.losses.npairs.NpairsLoss.__gt__": true,
+ "tfa.losses.npairs.NpairsLoss.__init__": true,
+ "tfa.losses.npairs.NpairsLoss.__le__": true,
+ "tfa.losses.npairs.NpairsLoss.__lt__": true,
+ "tfa.losses.npairs.NpairsLoss.__ne__": true,
+ "tfa.losses.npairs.NpairsLoss.__new__": true,
+ "tfa.losses.npairs.NpairsLoss.call": true,
+ "tfa.losses.npairs.NpairsLoss.from_config": true,
+ "tfa.losses.npairs.NpairsLoss.get_config": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss": false,
+ "tfa.losses.npairs.NpairsMultilabelLoss.__call__": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.__eq__": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.__ge__": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.__gt__": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.__init__": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.__le__": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.__lt__": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.__ne__": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.__new__": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.call": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.from_config": true,
+ "tfa.losses.npairs.NpairsMultilabelLoss.get_config": true,
+ "tfa.losses.npairs.absolute_import": true,
+ "tfa.losses.npairs.division": true,
+ "tfa.losses.npairs.npairs_loss": false,
+ "tfa.losses.npairs.npairs_multilabel_loss": false,
+ "tfa.losses.npairs.print_function": true,
+ "tfa.losses.npairs_loss": false,
+ "tfa.losses.npairs_multilabel_loss": false,
+ "tfa.losses.print_function": true,
+ "tfa.losses.sigmoid_focal_crossentropy": false,
+ "tfa.losses.sparsemax_loss": false,
+ "tfa.losses.triplet": false,
+ "tfa.losses.triplet.TripletSemiHardLoss": false,
+ "tfa.losses.triplet.TripletSemiHardLoss.__call__": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.__eq__": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.__ge__": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.__gt__": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.__init__": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.__le__": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.__lt__": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.__ne__": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.__new__": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.call": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.from_config": true,
+ "tfa.losses.triplet.TripletSemiHardLoss.get_config": true,
+ "tfa.losses.triplet.absolute_import": true,
+ "tfa.losses.triplet.division": true,
+ "tfa.losses.triplet.print_function": true,
+ "tfa.losses.triplet.triplet_semihard_loss": false,
+ "tfa.losses.triplet_semihard_loss": false,
+ "tfa.metrics": false,
+ "tfa.metrics.CohenKappa": false,
+ "tfa.metrics.CohenKappa.__call__": true,
+ "tfa.metrics.CohenKappa.__eq__": true,
+ "tfa.metrics.CohenKappa.__ge__": true,
+ "tfa.metrics.CohenKappa.__gt__": true,
+ "tfa.metrics.CohenKappa.__init__": true,
+ "tfa.metrics.CohenKappa.__le__": true,
+ "tfa.metrics.CohenKappa.__lt__": true,
+ "tfa.metrics.CohenKappa.__ne__": true,
+ "tfa.metrics.CohenKappa.__new__": true,
+ "tfa.metrics.CohenKappa.activity_regularizer": true,
+ "tfa.metrics.CohenKappa.add_loss": true,
+ "tfa.metrics.CohenKappa.add_metric": true,
+ "tfa.metrics.CohenKappa.add_update": true,
+ "tfa.metrics.CohenKappa.add_weight": true,
+ "tfa.metrics.CohenKappa.build": true,
+ "tfa.metrics.CohenKappa.call": true,
+ "tfa.metrics.CohenKappa.compute_mask": true,
+ "tfa.metrics.CohenKappa.compute_output_shape": true,
+ "tfa.metrics.CohenKappa.compute_output_signature": true,
+ "tfa.metrics.CohenKappa.count_params": true,
+ "tfa.metrics.CohenKappa.dtype": true,
+ "tfa.metrics.CohenKappa.dynamic": true,
+ "tfa.metrics.CohenKappa.from_config": true,
+ "tfa.metrics.CohenKappa.get_config": true,
+ "tfa.metrics.CohenKappa.get_input_at": true,
+ "tfa.metrics.CohenKappa.get_input_mask_at": true,
+ "tfa.metrics.CohenKappa.get_input_shape_at": true,
+ "tfa.metrics.CohenKappa.get_losses_for": true,
+ "tfa.metrics.CohenKappa.get_output_at": true,
+ "tfa.metrics.CohenKappa.get_output_mask_at": true,
+ "tfa.metrics.CohenKappa.get_output_shape_at": true,
+ "tfa.metrics.CohenKappa.get_updates_for": true,
+ "tfa.metrics.CohenKappa.get_weights": true,
+ "tfa.metrics.CohenKappa.input": true,
+ "tfa.metrics.CohenKappa.input_mask": true,
+ "tfa.metrics.CohenKappa.input_shape": true,
+ "tfa.metrics.CohenKappa.input_spec": true,
+ "tfa.metrics.CohenKappa.losses": true,
+ "tfa.metrics.CohenKappa.metrics": true,
+ "tfa.metrics.CohenKappa.name": true,
+ "tfa.metrics.CohenKappa.name_scope": true,
+ "tfa.metrics.CohenKappa.non_trainable_variables": true,
+ "tfa.metrics.CohenKappa.non_trainable_weights": true,
+ "tfa.metrics.CohenKappa.output": true,
+ "tfa.metrics.CohenKappa.output_mask": true,
+ "tfa.metrics.CohenKappa.output_shape": true,
+ "tfa.metrics.CohenKappa.reset_states": true,
+ "tfa.metrics.CohenKappa.result": true,
+ "tfa.metrics.CohenKappa.set_weights": true,
+ "tfa.metrics.CohenKappa.submodules": true,
+ "tfa.metrics.CohenKappa.trainable": true,
+ "tfa.metrics.CohenKappa.trainable_variables": true,
+ "tfa.metrics.CohenKappa.trainable_weights": true,
+ "tfa.metrics.CohenKappa.update_state": true,
+ "tfa.metrics.CohenKappa.updates": true,
+ "tfa.metrics.CohenKappa.variables": true,
+ "tfa.metrics.CohenKappa.weights": true,
+ "tfa.metrics.CohenKappa.with_name_scope": true,
+ "tfa.metrics.F1Score": false,
+ "tfa.metrics.F1Score.__call__": true,
+ "tfa.metrics.F1Score.__eq__": true,
+ "tfa.metrics.F1Score.__ge__": true,
+ "tfa.metrics.F1Score.__gt__": true,
+ "tfa.metrics.F1Score.__init__": true,
+ "tfa.metrics.F1Score.__le__": true,
+ "tfa.metrics.F1Score.__lt__": true,
+ "tfa.metrics.F1Score.__ne__": true,
+ "tfa.metrics.F1Score.__new__": true,
+ "tfa.metrics.F1Score.activity_regularizer": true,
+ "tfa.metrics.F1Score.add_loss": true,
+ "tfa.metrics.F1Score.add_metric": true,
+ "tfa.metrics.F1Score.add_update": true,
+ "tfa.metrics.F1Score.add_weight": true,
+ "tfa.metrics.F1Score.build": true,
+ "tfa.metrics.F1Score.call": true,
+ "tfa.metrics.F1Score.compute_mask": true,
+ "tfa.metrics.F1Score.compute_output_shape": true,
+ "tfa.metrics.F1Score.compute_output_signature": true,
+ "tfa.metrics.F1Score.count_params": true,
+ "tfa.metrics.F1Score.dtype": true,
+ "tfa.metrics.F1Score.dynamic": true,
+ "tfa.metrics.F1Score.from_config": true,
+ "tfa.metrics.F1Score.get_config": true,
+ "tfa.metrics.F1Score.get_input_at": true,
+ "tfa.metrics.F1Score.get_input_mask_at": true,
+ "tfa.metrics.F1Score.get_input_shape_at": true,
+ "tfa.metrics.F1Score.get_losses_for": true,
+ "tfa.metrics.F1Score.get_output_at": true,
+ "tfa.metrics.F1Score.get_output_mask_at": true,
+ "tfa.metrics.F1Score.get_output_shape_at": true,
+ "tfa.metrics.F1Score.get_updates_for": true,
+ "tfa.metrics.F1Score.get_weights": true,
+ "tfa.metrics.F1Score.input": true,
+ "tfa.metrics.F1Score.input_mask": true,
+ "tfa.metrics.F1Score.input_shape": true,
+ "tfa.metrics.F1Score.input_spec": true,
+ "tfa.metrics.F1Score.losses": true,
+ "tfa.metrics.F1Score.metrics": true,
+ "tfa.metrics.F1Score.name": true,
+ "tfa.metrics.F1Score.name_scope": true,
+ "tfa.metrics.F1Score.non_trainable_variables": true,
+ "tfa.metrics.F1Score.non_trainable_weights": true,
+ "tfa.metrics.F1Score.output": true,
+ "tfa.metrics.F1Score.output_mask": true,
+ "tfa.metrics.F1Score.output_shape": true,
+ "tfa.metrics.F1Score.reset_states": true,
+ "tfa.metrics.F1Score.result": true,
+ "tfa.metrics.F1Score.set_weights": true,
+ "tfa.metrics.F1Score.submodules": true,
+ "tfa.metrics.F1Score.trainable": true,
+ "tfa.metrics.F1Score.trainable_variables": true,
+ "tfa.metrics.F1Score.trainable_weights": true,
+ "tfa.metrics.F1Score.update_state": true,
+ "tfa.metrics.F1Score.updates": true,
+ "tfa.metrics.F1Score.variables": true,
+ "tfa.metrics.F1Score.weights": true,
+ "tfa.metrics.F1Score.with_name_scope": true,
+ "tfa.metrics.FBetaScore": false,
+ "tfa.metrics.FBetaScore.__call__": true,
+ "tfa.metrics.FBetaScore.__eq__": true,
+ "tfa.metrics.FBetaScore.__ge__": true,
+ "tfa.metrics.FBetaScore.__gt__": true,
+ "tfa.metrics.FBetaScore.__init__": true,
+ "tfa.metrics.FBetaScore.__le__": true,
+ "tfa.metrics.FBetaScore.__lt__": true,
+ "tfa.metrics.FBetaScore.__ne__": true,
+ "tfa.metrics.FBetaScore.__new__": true,
+ "tfa.metrics.FBetaScore.activity_regularizer": true,
+ "tfa.metrics.FBetaScore.add_loss": true,
+ "tfa.metrics.FBetaScore.add_metric": true,
+ "tfa.metrics.FBetaScore.add_update": true,
+ "tfa.metrics.FBetaScore.add_weight": true,
+ "tfa.metrics.FBetaScore.build": true,
+ "tfa.metrics.FBetaScore.call": true,
+ "tfa.metrics.FBetaScore.compute_mask": true,
+ "tfa.metrics.FBetaScore.compute_output_shape": true,
+ "tfa.metrics.FBetaScore.compute_output_signature": true,
+ "tfa.metrics.FBetaScore.count_params": true,
+ "tfa.metrics.FBetaScore.dtype": true,
+ "tfa.metrics.FBetaScore.dynamic": true,
+ "tfa.metrics.FBetaScore.from_config": true,
+ "tfa.metrics.FBetaScore.get_config": true,
+ "tfa.metrics.FBetaScore.get_input_at": true,
+ "tfa.metrics.FBetaScore.get_input_mask_at": true,
+ "tfa.metrics.FBetaScore.get_input_shape_at": true,
+ "tfa.metrics.FBetaScore.get_losses_for": true,
+ "tfa.metrics.FBetaScore.get_output_at": true,
+ "tfa.metrics.FBetaScore.get_output_mask_at": true,
+ "tfa.metrics.FBetaScore.get_output_shape_at": true,
+ "tfa.metrics.FBetaScore.get_updates_for": true,
+ "tfa.metrics.FBetaScore.get_weights": true,
+ "tfa.metrics.FBetaScore.input": true,
+ "tfa.metrics.FBetaScore.input_mask": true,
+ "tfa.metrics.FBetaScore.input_shape": true,
+ "tfa.metrics.FBetaScore.input_spec": true,
+ "tfa.metrics.FBetaScore.losses": true,
+ "tfa.metrics.FBetaScore.metrics": true,
+ "tfa.metrics.FBetaScore.name": true,
+ "tfa.metrics.FBetaScore.name_scope": true,
+ "tfa.metrics.FBetaScore.non_trainable_variables": true,
+ "tfa.metrics.FBetaScore.non_trainable_weights": true,
+ "tfa.metrics.FBetaScore.output": true,
+ "tfa.metrics.FBetaScore.output_mask": true,
+ "tfa.metrics.FBetaScore.output_shape": true,
+ "tfa.metrics.FBetaScore.reset_states": true,
+ "tfa.metrics.FBetaScore.result": true,
+ "tfa.metrics.FBetaScore.set_weights": true,
+ "tfa.metrics.FBetaScore.submodules": true,
+ "tfa.metrics.FBetaScore.trainable": true,
+ "tfa.metrics.FBetaScore.trainable_variables": true,
+ "tfa.metrics.FBetaScore.trainable_weights": true,
+ "tfa.metrics.FBetaScore.update_state": true,
+ "tfa.metrics.FBetaScore.updates": true,
+ "tfa.metrics.FBetaScore.variables": true,
+ "tfa.metrics.FBetaScore.weights": true,
+ "tfa.metrics.FBetaScore.with_name_scope": true,
+ "tfa.metrics.HammingLoss": false,
+ "tfa.metrics.HammingLoss.__call__": true,
+ "tfa.metrics.HammingLoss.__eq__": true,
+ "tfa.metrics.HammingLoss.__ge__": true,
+ "tfa.metrics.HammingLoss.__gt__": true,
+ "tfa.metrics.HammingLoss.__init__": true,
+ "tfa.metrics.HammingLoss.__le__": true,
+ "tfa.metrics.HammingLoss.__lt__": true,
+ "tfa.metrics.HammingLoss.__ne__": true,
+ "tfa.metrics.HammingLoss.__new__": true,
+ "tfa.metrics.HammingLoss.activity_regularizer": true,
+ "tfa.metrics.HammingLoss.add_loss": true,
+ "tfa.metrics.HammingLoss.add_metric": true,
+ "tfa.metrics.HammingLoss.add_update": true,
+ "tfa.metrics.HammingLoss.add_weight": true,
+ "tfa.metrics.HammingLoss.build": true,
+ "tfa.metrics.HammingLoss.call": true,
+ "tfa.metrics.HammingLoss.compute_mask": true,
+ "tfa.metrics.HammingLoss.compute_output_shape": true,
+ "tfa.metrics.HammingLoss.compute_output_signature": true,
+ "tfa.metrics.HammingLoss.count_params": true,
+ "tfa.metrics.HammingLoss.dtype": true,
+ "tfa.metrics.HammingLoss.dynamic": true,
+ "tfa.metrics.HammingLoss.from_config": true,
+ "tfa.metrics.HammingLoss.get_config": true,
+ "tfa.metrics.HammingLoss.get_input_at": true,
+ "tfa.metrics.HammingLoss.get_input_mask_at": true,
+ "tfa.metrics.HammingLoss.get_input_shape_at": true,
+ "tfa.metrics.HammingLoss.get_losses_for": true,
+ "tfa.metrics.HammingLoss.get_output_at": true,
+ "tfa.metrics.HammingLoss.get_output_mask_at": true,
+ "tfa.metrics.HammingLoss.get_output_shape_at": true,
+ "tfa.metrics.HammingLoss.get_updates_for": true,
+ "tfa.metrics.HammingLoss.get_weights": true,
+ "tfa.metrics.HammingLoss.input": true,
+ "tfa.metrics.HammingLoss.input_mask": true,
+ "tfa.metrics.HammingLoss.input_shape": true,
+ "tfa.metrics.HammingLoss.input_spec": true,
+ "tfa.metrics.HammingLoss.losses": true,
+ "tfa.metrics.HammingLoss.metrics": true,
+ "tfa.metrics.HammingLoss.name": true,
+ "tfa.metrics.HammingLoss.name_scope": true,
+ "tfa.metrics.HammingLoss.non_trainable_variables": true,
+ "tfa.metrics.HammingLoss.non_trainable_weights": true,
+ "tfa.metrics.HammingLoss.output": true,
+ "tfa.metrics.HammingLoss.output_mask": true,
+ "tfa.metrics.HammingLoss.output_shape": true,
+ "tfa.metrics.HammingLoss.reset_states": true,
+ "tfa.metrics.HammingLoss.result": true,
+ "tfa.metrics.HammingLoss.set_weights": true,
+ "tfa.metrics.HammingLoss.submodules": true,
+ "tfa.metrics.HammingLoss.trainable": true,
+ "tfa.metrics.HammingLoss.trainable_variables": true,
+ "tfa.metrics.HammingLoss.trainable_weights": true,
+ "tfa.metrics.HammingLoss.update_state": true,
+ "tfa.metrics.HammingLoss.updates": true,
+ "tfa.metrics.HammingLoss.variables": true,
+ "tfa.metrics.HammingLoss.weights": true,
+ "tfa.metrics.HammingLoss.with_name_scope": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient": false,
+ "tfa.metrics.MatthewsCorrelationCoefficient.__call__": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.__eq__": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.__ge__": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.__gt__": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.__init__": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.__le__": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.__lt__": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.__ne__": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.__new__": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.activity_regularizer": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.add_loss": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.add_metric": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.add_update": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.add_weight": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.build": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.call": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.compute_mask": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.compute_output_shape": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.compute_output_signature": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.count_params": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.dtype": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.dynamic": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.from_config": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_config": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_input_at": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_input_mask_at": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_input_shape_at": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_losses_for": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_output_at": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_output_mask_at": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_output_shape_at": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_updates_for": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.get_weights": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.input": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.input_mask": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.input_shape": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.input_spec": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.losses": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.metrics": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.name": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.name_scope": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.non_trainable_variables": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.non_trainable_weights": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.output": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.output_mask": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.output_shape": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.reset_states": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.result": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.set_weights": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.submodules": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.trainable": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.trainable_variables": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.trainable_weights": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.update_state": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.updates": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.variables": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.weights": true,
+ "tfa.metrics.MatthewsCorrelationCoefficient.with_name_scope": true,
+ "tfa.metrics.MeanMetricWrapper": false,
+ "tfa.metrics.MeanMetricWrapper.__call__": true,
+ "tfa.metrics.MeanMetricWrapper.__eq__": true,
+ "tfa.metrics.MeanMetricWrapper.__ge__": true,
+ "tfa.metrics.MeanMetricWrapper.__gt__": true,
+ "tfa.metrics.MeanMetricWrapper.__init__": true,
+ "tfa.metrics.MeanMetricWrapper.__le__": true,
+ "tfa.metrics.MeanMetricWrapper.__lt__": true,
+ "tfa.metrics.MeanMetricWrapper.__ne__": true,
+ "tfa.metrics.MeanMetricWrapper.__new__": true,
+ "tfa.metrics.MeanMetricWrapper.activity_regularizer": true,
+ "tfa.metrics.MeanMetricWrapper.add_loss": true,
+ "tfa.metrics.MeanMetricWrapper.add_metric": true,
+ "tfa.metrics.MeanMetricWrapper.add_update": true,
+ "tfa.metrics.MeanMetricWrapper.add_weight": true,
+ "tfa.metrics.MeanMetricWrapper.build": true,
+ "tfa.metrics.MeanMetricWrapper.call": true,
+ "tfa.metrics.MeanMetricWrapper.compute_mask": true,
+ "tfa.metrics.MeanMetricWrapper.compute_output_shape": true,
+ "tfa.metrics.MeanMetricWrapper.compute_output_signature": true,
+ "tfa.metrics.MeanMetricWrapper.count_params": true,
+ "tfa.metrics.MeanMetricWrapper.dtype": true,
+ "tfa.metrics.MeanMetricWrapper.dynamic": true,
+ "tfa.metrics.MeanMetricWrapper.from_config": true,
+ "tfa.metrics.MeanMetricWrapper.get_config": true,
+ "tfa.metrics.MeanMetricWrapper.get_input_at": true,
+ "tfa.metrics.MeanMetricWrapper.get_input_mask_at": true,
+ "tfa.metrics.MeanMetricWrapper.get_input_shape_at": true,
+ "tfa.metrics.MeanMetricWrapper.get_losses_for": true,
+ "tfa.metrics.MeanMetricWrapper.get_output_at": true,
+ "tfa.metrics.MeanMetricWrapper.get_output_mask_at": true,
+ "tfa.metrics.MeanMetricWrapper.get_output_shape_at": true,
+ "tfa.metrics.MeanMetricWrapper.get_updates_for": true,
+ "tfa.metrics.MeanMetricWrapper.get_weights": true,
+ "tfa.metrics.MeanMetricWrapper.input": true,
+ "tfa.metrics.MeanMetricWrapper.input_mask": true,
+ "tfa.metrics.MeanMetricWrapper.input_shape": true,
+ "tfa.metrics.MeanMetricWrapper.input_spec": true,
+ "tfa.metrics.MeanMetricWrapper.losses": true,
+ "tfa.metrics.MeanMetricWrapper.metrics": true,
+ "tfa.metrics.MeanMetricWrapper.name": true,
+ "tfa.metrics.MeanMetricWrapper.name_scope": true,
+ "tfa.metrics.MeanMetricWrapper.non_trainable_variables": true,
+ "tfa.metrics.MeanMetricWrapper.non_trainable_weights": true,
+ "tfa.metrics.MeanMetricWrapper.output": true,
+ "tfa.metrics.MeanMetricWrapper.output_mask": true,
+ "tfa.metrics.MeanMetricWrapper.output_shape": true,
+ "tfa.metrics.MeanMetricWrapper.reset_states": true,
+ "tfa.metrics.MeanMetricWrapper.result": true,
+ "tfa.metrics.MeanMetricWrapper.set_weights": true,
+ "tfa.metrics.MeanMetricWrapper.submodules": true,
+ "tfa.metrics.MeanMetricWrapper.trainable": true,
+ "tfa.metrics.MeanMetricWrapper.trainable_variables": true,
+ "tfa.metrics.MeanMetricWrapper.trainable_weights": true,
+ "tfa.metrics.MeanMetricWrapper.update_state": true,
+ "tfa.metrics.MeanMetricWrapper.updates": true,
+ "tfa.metrics.MeanMetricWrapper.variables": true,
+ "tfa.metrics.MeanMetricWrapper.weights": true,
+ "tfa.metrics.MeanMetricWrapper.with_name_scope": true,
+ "tfa.metrics.MultiLabelConfusionMatrix": false,
+ "tfa.metrics.MultiLabelConfusionMatrix.__call__": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.__eq__": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.__ge__": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.__gt__": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.__init__": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.__le__": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.__lt__": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.__ne__": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.__new__": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.activity_regularizer": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.add_loss": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.add_metric": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.add_update": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.add_weight": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.build": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.call": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.compute_mask": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.compute_output_shape": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.compute_output_signature": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.count_params": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.dtype": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.dynamic": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.from_config": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_config": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_input_at": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_input_mask_at": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_input_shape_at": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_losses_for": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_output_at": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_output_mask_at": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_output_shape_at": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_updates_for": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.get_weights": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.input": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.input_mask": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.input_shape": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.input_spec": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.losses": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.metrics": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.name": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.name_scope": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.non_trainable_variables": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.non_trainable_weights": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.output": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.output_mask": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.output_shape": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.reset_states": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.result": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.set_weights": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.submodules": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.trainable": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.trainable_variables": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.trainable_weights": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.update_state": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.updates": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.variables": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.weights": true,
+ "tfa.metrics.MultiLabelConfusionMatrix.with_name_scope": true,
+ "tfa.metrics.RSquare": false,
+ "tfa.metrics.RSquare.__call__": true,
+ "tfa.metrics.RSquare.__eq__": true,
+ "tfa.metrics.RSquare.__ge__": true,
+ "tfa.metrics.RSquare.__gt__": true,
+ "tfa.metrics.RSquare.__init__": true,
+ "tfa.metrics.RSquare.__le__": true,
+ "tfa.metrics.RSquare.__lt__": true,
+ "tfa.metrics.RSquare.__ne__": true,
+ "tfa.metrics.RSquare.__new__": true,
+ "tfa.metrics.RSquare.activity_regularizer": true,
+ "tfa.metrics.RSquare.add_loss": true,
+ "tfa.metrics.RSquare.add_metric": true,
+ "tfa.metrics.RSquare.add_update": true,
+ "tfa.metrics.RSquare.add_weight": true,
+ "tfa.metrics.RSquare.build": true,
+ "tfa.metrics.RSquare.call": true,
+ "tfa.metrics.RSquare.compute_mask": true,
+ "tfa.metrics.RSquare.compute_output_shape": true,
+ "tfa.metrics.RSquare.compute_output_signature": true,
+ "tfa.metrics.RSquare.count_params": true,
+ "tfa.metrics.RSquare.dtype": true,
+ "tfa.metrics.RSquare.dynamic": true,
+ "tfa.metrics.RSquare.from_config": true,
+ "tfa.metrics.RSquare.get_config": true,
+ "tfa.metrics.RSquare.get_input_at": true,
+ "tfa.metrics.RSquare.get_input_mask_at": true,
+ "tfa.metrics.RSquare.get_input_shape_at": true,
+ "tfa.metrics.RSquare.get_losses_for": true,
+ "tfa.metrics.RSquare.get_output_at": true,
+ "tfa.metrics.RSquare.get_output_mask_at": true,
+ "tfa.metrics.RSquare.get_output_shape_at": true,
+ "tfa.metrics.RSquare.get_updates_for": true,
+ "tfa.metrics.RSquare.get_weights": true,
+ "tfa.metrics.RSquare.input": true,
+ "tfa.metrics.RSquare.input_mask": true,
+ "tfa.metrics.RSquare.input_shape": true,
+ "tfa.metrics.RSquare.input_spec": true,
+ "tfa.metrics.RSquare.losses": true,
+ "tfa.metrics.RSquare.metrics": true,
+ "tfa.metrics.RSquare.name": true,
+ "tfa.metrics.RSquare.name_scope": true,
+ "tfa.metrics.RSquare.non_trainable_variables": true,
+ "tfa.metrics.RSquare.non_trainable_weights": true,
+ "tfa.metrics.RSquare.output": true,
+ "tfa.metrics.RSquare.output_mask": true,
+ "tfa.metrics.RSquare.output_shape": true,
+ "tfa.metrics.RSquare.reset_states": true,
+ "tfa.metrics.RSquare.result": true,
+ "tfa.metrics.RSquare.set_weights": true,
+ "tfa.metrics.RSquare.submodules": true,
+ "tfa.metrics.RSquare.trainable": true,
+ "tfa.metrics.RSquare.trainable_variables": true,
+ "tfa.metrics.RSquare.trainable_weights": true,
+ "tfa.metrics.RSquare.update_state": true,
+ "tfa.metrics.RSquare.updates": true,
+ "tfa.metrics.RSquare.variables": true,
+ "tfa.metrics.RSquare.weights": true,
+ "tfa.metrics.RSquare.with_name_scope": true,
+ "tfa.metrics.absolute_import": true,
+ "tfa.metrics.cohens_kappa": false,
+ "tfa.metrics.cohens_kappa.CohenKappa": false,
+ "tfa.metrics.cohens_kappa.CohenKappa.__call__": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.__eq__": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.__ge__": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.__gt__": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.__init__": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.__le__": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.__lt__": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.__ne__": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.__new__": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.activity_regularizer": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.add_loss": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.add_metric": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.add_update": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.add_weight": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.build": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.call": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.compute_mask": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.compute_output_shape": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.compute_output_signature": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.count_params": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.dtype": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.dynamic": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.from_config": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_config": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_input_at": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_input_mask_at": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_input_shape_at": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_losses_for": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_output_at": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_output_mask_at": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_output_shape_at": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_updates_for": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.get_weights": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.input": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.input_mask": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.input_shape": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.input_spec": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.losses": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.metrics": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.name": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.name_scope": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.non_trainable_variables": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.non_trainable_weights": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.output": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.output_mask": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.output_shape": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.reset_states": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.result": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.set_weights": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.submodules": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.trainable": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.trainable_variables": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.trainable_weights": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.update_state": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.updates": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.variables": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.weights": true,
+ "tfa.metrics.cohens_kappa.CohenKappa.with_name_scope": true,
+ "tfa.metrics.cohens_kappa.absolute_import": true,
+ "tfa.metrics.cohens_kappa.division": true,
+ "tfa.metrics.cohens_kappa.print_function": true,
+ "tfa.metrics.division": true,
+ "tfa.metrics.f_scores": false,
+ "tfa.metrics.f_scores.F1Score": false,
+ "tfa.metrics.f_scores.F1Score.__call__": true,
+ "tfa.metrics.f_scores.F1Score.__eq__": true,
+ "tfa.metrics.f_scores.F1Score.__ge__": true,
+ "tfa.metrics.f_scores.F1Score.__gt__": true,
+ "tfa.metrics.f_scores.F1Score.__init__": true,
+ "tfa.metrics.f_scores.F1Score.__le__": true,
+ "tfa.metrics.f_scores.F1Score.__lt__": true,
+ "tfa.metrics.f_scores.F1Score.__ne__": true,
+ "tfa.metrics.f_scores.F1Score.__new__": true,
+ "tfa.metrics.f_scores.F1Score.activity_regularizer": true,
+ "tfa.metrics.f_scores.F1Score.add_loss": true,
+ "tfa.metrics.f_scores.F1Score.add_metric": true,
+ "tfa.metrics.f_scores.F1Score.add_update": true,
+ "tfa.metrics.f_scores.F1Score.add_weight": true,
+ "tfa.metrics.f_scores.F1Score.build": true,
+ "tfa.metrics.f_scores.F1Score.call": true,
+ "tfa.metrics.f_scores.F1Score.compute_mask": true,
+ "tfa.metrics.f_scores.F1Score.compute_output_shape": true,
+ "tfa.metrics.f_scores.F1Score.compute_output_signature": true,
+ "tfa.metrics.f_scores.F1Score.count_params": true,
+ "tfa.metrics.f_scores.F1Score.dtype": true,
+ "tfa.metrics.f_scores.F1Score.dynamic": true,
+ "tfa.metrics.f_scores.F1Score.from_config": true,
+ "tfa.metrics.f_scores.F1Score.get_config": true,
+ "tfa.metrics.f_scores.F1Score.get_input_at": true,
+ "tfa.metrics.f_scores.F1Score.get_input_mask_at": true,
+ "tfa.metrics.f_scores.F1Score.get_input_shape_at": true,
+ "tfa.metrics.f_scores.F1Score.get_losses_for": true,
+ "tfa.metrics.f_scores.F1Score.get_output_at": true,
+ "tfa.metrics.f_scores.F1Score.get_output_mask_at": true,
+ "tfa.metrics.f_scores.F1Score.get_output_shape_at": true,
+ "tfa.metrics.f_scores.F1Score.get_updates_for": true,
+ "tfa.metrics.f_scores.F1Score.get_weights": true,
+ "tfa.metrics.f_scores.F1Score.input": true,
+ "tfa.metrics.f_scores.F1Score.input_mask": true,
+ "tfa.metrics.f_scores.F1Score.input_shape": true,
+ "tfa.metrics.f_scores.F1Score.input_spec": true,
+ "tfa.metrics.f_scores.F1Score.losses": true,
+ "tfa.metrics.f_scores.F1Score.metrics": true,
+ "tfa.metrics.f_scores.F1Score.name": true,
+ "tfa.metrics.f_scores.F1Score.name_scope": true,
+ "tfa.metrics.f_scores.F1Score.non_trainable_variables": true,
+ "tfa.metrics.f_scores.F1Score.non_trainable_weights": true,
+ "tfa.metrics.f_scores.F1Score.output": true,
+ "tfa.metrics.f_scores.F1Score.output_mask": true,
+ "tfa.metrics.f_scores.F1Score.output_shape": true,
+ "tfa.metrics.f_scores.F1Score.reset_states": true,
+ "tfa.metrics.f_scores.F1Score.result": true,
+ "tfa.metrics.f_scores.F1Score.set_weights": true,
+ "tfa.metrics.f_scores.F1Score.submodules": true,
+ "tfa.metrics.f_scores.F1Score.trainable": true,
+ "tfa.metrics.f_scores.F1Score.trainable_variables": true,
+ "tfa.metrics.f_scores.F1Score.trainable_weights": true,
+ "tfa.metrics.f_scores.F1Score.update_state": true,
+ "tfa.metrics.f_scores.F1Score.updates": true,
+ "tfa.metrics.f_scores.F1Score.variables": true,
+ "tfa.metrics.f_scores.F1Score.weights": true,
+ "tfa.metrics.f_scores.F1Score.with_name_scope": true,
+ "tfa.metrics.f_scores.FBetaScore": false,
+ "tfa.metrics.f_scores.FBetaScore.__call__": true,
+ "tfa.metrics.f_scores.FBetaScore.__eq__": true,
+ "tfa.metrics.f_scores.FBetaScore.__ge__": true,
+ "tfa.metrics.f_scores.FBetaScore.__gt__": true,
+ "tfa.metrics.f_scores.FBetaScore.__init__": true,
+ "tfa.metrics.f_scores.FBetaScore.__le__": true,
+ "tfa.metrics.f_scores.FBetaScore.__lt__": true,
+ "tfa.metrics.f_scores.FBetaScore.__ne__": true,
+ "tfa.metrics.f_scores.FBetaScore.__new__": true,
+ "tfa.metrics.f_scores.FBetaScore.activity_regularizer": true,
+ "tfa.metrics.f_scores.FBetaScore.add_loss": true,
+ "tfa.metrics.f_scores.FBetaScore.add_metric": true,
+ "tfa.metrics.f_scores.FBetaScore.add_update": true,
+ "tfa.metrics.f_scores.FBetaScore.add_weight": true,
+ "tfa.metrics.f_scores.FBetaScore.build": true,
+ "tfa.metrics.f_scores.FBetaScore.call": true,
+ "tfa.metrics.f_scores.FBetaScore.compute_mask": true,
+ "tfa.metrics.f_scores.FBetaScore.compute_output_shape": true,
+ "tfa.metrics.f_scores.FBetaScore.compute_output_signature": true,
+ "tfa.metrics.f_scores.FBetaScore.count_params": true,
+ "tfa.metrics.f_scores.FBetaScore.dtype": true,
+ "tfa.metrics.f_scores.FBetaScore.dynamic": true,
+ "tfa.metrics.f_scores.FBetaScore.from_config": true,
+ "tfa.metrics.f_scores.FBetaScore.get_config": true,
+ "tfa.metrics.f_scores.FBetaScore.get_input_at": true,
+ "tfa.metrics.f_scores.FBetaScore.get_input_mask_at": true,
+ "tfa.metrics.f_scores.FBetaScore.get_input_shape_at": true,
+ "tfa.metrics.f_scores.FBetaScore.get_losses_for": true,
+ "tfa.metrics.f_scores.FBetaScore.get_output_at": true,
+ "tfa.metrics.f_scores.FBetaScore.get_output_mask_at": true,
+ "tfa.metrics.f_scores.FBetaScore.get_output_shape_at": true,
+ "tfa.metrics.f_scores.FBetaScore.get_updates_for": true,
+ "tfa.metrics.f_scores.FBetaScore.get_weights": true,
+ "tfa.metrics.f_scores.FBetaScore.input": true,
+ "tfa.metrics.f_scores.FBetaScore.input_mask": true,
+ "tfa.metrics.f_scores.FBetaScore.input_shape": true,
+ "tfa.metrics.f_scores.FBetaScore.input_spec": true,
+ "tfa.metrics.f_scores.FBetaScore.losses": true,
+ "tfa.metrics.f_scores.FBetaScore.metrics": true,
+ "tfa.metrics.f_scores.FBetaScore.name": true,
+ "tfa.metrics.f_scores.FBetaScore.name_scope": true,
+ "tfa.metrics.f_scores.FBetaScore.non_trainable_variables": true,
+ "tfa.metrics.f_scores.FBetaScore.non_trainable_weights": true,
+ "tfa.metrics.f_scores.FBetaScore.output": true,
+ "tfa.metrics.f_scores.FBetaScore.output_mask": true,
+ "tfa.metrics.f_scores.FBetaScore.output_shape": true,
+ "tfa.metrics.f_scores.FBetaScore.reset_states": true,
+ "tfa.metrics.f_scores.FBetaScore.result": true,
+ "tfa.metrics.f_scores.FBetaScore.set_weights": true,
+ "tfa.metrics.f_scores.FBetaScore.submodules": true,
+ "tfa.metrics.f_scores.FBetaScore.trainable": true,
+ "tfa.metrics.f_scores.FBetaScore.trainable_variables": true,
+ "tfa.metrics.f_scores.FBetaScore.trainable_weights": true,
+ "tfa.metrics.f_scores.FBetaScore.update_state": true,
+ "tfa.metrics.f_scores.FBetaScore.updates": true,
+ "tfa.metrics.f_scores.FBetaScore.variables": true,
+ "tfa.metrics.f_scores.FBetaScore.weights": true,
+ "tfa.metrics.f_scores.FBetaScore.with_name_scope": true,
+ "tfa.metrics.f_scores.absolute_import": true,
+ "tfa.metrics.f_scores.division": true,
+ "tfa.metrics.f_scores.print_function": true,
+ "tfa.metrics.hamming": false,
+ "tfa.metrics.hamming.HammingLoss": false,
+ "tfa.metrics.hamming.HammingLoss.__call__": true,
+ "tfa.metrics.hamming.HammingLoss.__eq__": true,
+ "tfa.metrics.hamming.HammingLoss.__ge__": true,
+ "tfa.metrics.hamming.HammingLoss.__gt__": true,
+ "tfa.metrics.hamming.HammingLoss.__init__": true,
+ "tfa.metrics.hamming.HammingLoss.__le__": true,
+ "tfa.metrics.hamming.HammingLoss.__lt__": true,
+ "tfa.metrics.hamming.HammingLoss.__ne__": true,
+ "tfa.metrics.hamming.HammingLoss.__new__": true,
+ "tfa.metrics.hamming.HammingLoss.activity_regularizer": true,
+ "tfa.metrics.hamming.HammingLoss.add_loss": true,
+ "tfa.metrics.hamming.HammingLoss.add_metric": true,
+ "tfa.metrics.hamming.HammingLoss.add_update": true,
+ "tfa.metrics.hamming.HammingLoss.add_weight": true,
+ "tfa.metrics.hamming.HammingLoss.build": true,
+ "tfa.metrics.hamming.HammingLoss.call": true,
+ "tfa.metrics.hamming.HammingLoss.compute_mask": true,
+ "tfa.metrics.hamming.HammingLoss.compute_output_shape": true,
+ "tfa.metrics.hamming.HammingLoss.compute_output_signature": true,
+ "tfa.metrics.hamming.HammingLoss.count_params": true,
+ "tfa.metrics.hamming.HammingLoss.dtype": true,
+ "tfa.metrics.hamming.HammingLoss.dynamic": true,
+ "tfa.metrics.hamming.HammingLoss.from_config": true,
+ "tfa.metrics.hamming.HammingLoss.get_config": true,
+ "tfa.metrics.hamming.HammingLoss.get_input_at": true,
+ "tfa.metrics.hamming.HammingLoss.get_input_mask_at": true,
+ "tfa.metrics.hamming.HammingLoss.get_input_shape_at": true,
+ "tfa.metrics.hamming.HammingLoss.get_losses_for": true,
+ "tfa.metrics.hamming.HammingLoss.get_output_at": true,
+ "tfa.metrics.hamming.HammingLoss.get_output_mask_at": true,
+ "tfa.metrics.hamming.HammingLoss.get_output_shape_at": true,
+ "tfa.metrics.hamming.HammingLoss.get_updates_for": true,
+ "tfa.metrics.hamming.HammingLoss.get_weights": true,
+ "tfa.metrics.hamming.HammingLoss.input": true,
+ "tfa.metrics.hamming.HammingLoss.input_mask": true,
+ "tfa.metrics.hamming.HammingLoss.input_shape": true,
+ "tfa.metrics.hamming.HammingLoss.input_spec": true,
+ "tfa.metrics.hamming.HammingLoss.losses": true,
+ "tfa.metrics.hamming.HammingLoss.metrics": true,
+ "tfa.metrics.hamming.HammingLoss.name": true,
+ "tfa.metrics.hamming.HammingLoss.name_scope": true,
+ "tfa.metrics.hamming.HammingLoss.non_trainable_variables": true,
+ "tfa.metrics.hamming.HammingLoss.non_trainable_weights": true,
+ "tfa.metrics.hamming.HammingLoss.output": true,
+ "tfa.metrics.hamming.HammingLoss.output_mask": true,
+ "tfa.metrics.hamming.HammingLoss.output_shape": true,
+ "tfa.metrics.hamming.HammingLoss.reset_states": true,
+ "tfa.metrics.hamming.HammingLoss.result": true,
+ "tfa.metrics.hamming.HammingLoss.set_weights": true,
+ "tfa.metrics.hamming.HammingLoss.submodules": true,
+ "tfa.metrics.hamming.HammingLoss.trainable": true,
+ "tfa.metrics.hamming.HammingLoss.trainable_variables": true,
+ "tfa.metrics.hamming.HammingLoss.trainable_weights": true,
+ "tfa.metrics.hamming.HammingLoss.update_state": true,
+ "tfa.metrics.hamming.HammingLoss.updates": true,
+ "tfa.metrics.hamming.HammingLoss.variables": true,
+ "tfa.metrics.hamming.HammingLoss.weights": true,
+ "tfa.metrics.hamming.HammingLoss.with_name_scope": true,
+ "tfa.metrics.hamming.absolute_import": true,
+ "tfa.metrics.hamming.division": true,
+ "tfa.metrics.hamming.hamming_distance": false,
+ "tfa.metrics.hamming.hamming_loss_fn": false,
+ "tfa.metrics.hamming.print_function": true,
+ "tfa.metrics.hamming_distance": false,
+ "tfa.metrics.matthews_correlation_coefficient": false,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient": false,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__call__": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__eq__": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__ge__": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__gt__": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__init__": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__le__": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__lt__": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__ne__": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.__new__": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.activity_regularizer": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.add_loss": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.add_metric": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.add_update": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.add_weight": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.build": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.call": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.compute_mask": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.compute_output_shape": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.compute_output_signature": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.count_params": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.dtype": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.dynamic": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.from_config": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_config": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_input_at": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_input_mask_at": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_input_shape_at": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_losses_for": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_output_at": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_output_mask_at": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_output_shape_at": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_updates_for": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.get_weights": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.input": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.input_mask": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.input_shape": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.input_spec": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.losses": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.metrics": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.name": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.name_scope": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.non_trainable_variables": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.non_trainable_weights": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.output": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.output_mask": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.output_shape": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.reset_states": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.result": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.set_weights": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.submodules": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.trainable": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.trainable_variables": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.trainable_weights": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.update_state": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.updates": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.variables": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.weights": true,
+ "tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient.with_name_scope": true,
+ "tfa.metrics.matthews_correlation_coefficient.absolute_import": true,
+ "tfa.metrics.matthews_correlation_coefficient.division": true,
+ "tfa.metrics.matthews_correlation_coefficient.print_function": true,
+ "tfa.metrics.multilabel_confusion_matrix": false,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix": false,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__call__": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__eq__": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__ge__": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__gt__": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__init__": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__le__": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__lt__": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__ne__": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.__new__": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.activity_regularizer": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.add_loss": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.add_metric": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.add_update": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.add_weight": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.build": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.call": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.compute_mask": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.compute_output_shape": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.compute_output_signature": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.count_params": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.dtype": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.dynamic": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.from_config": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_config": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_input_at": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_input_mask_at": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_input_shape_at": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_losses_for": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_output_at": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_output_mask_at": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_output_shape_at": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_updates_for": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.get_weights": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.input": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.input_mask": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.input_shape": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.input_spec": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.losses": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.metrics": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.name": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.name_scope": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.non_trainable_variables": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.non_trainable_weights": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.output": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.output_mask": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.output_shape": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.reset_states": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.result": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.set_weights": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.submodules": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.trainable": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.trainable_variables": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.trainable_weights": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.update_state": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.updates": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.variables": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.weights": true,
+ "tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix.with_name_scope": true,
+ "tfa.metrics.multilabel_confusion_matrix.absolute_import": true,
+ "tfa.metrics.multilabel_confusion_matrix.division": true,
+ "tfa.metrics.multilabel_confusion_matrix.print_function": true,
+ "tfa.metrics.print_function": true,
+ "tfa.metrics.r_square": false,
+ "tfa.metrics.r_square.RSquare": false,
+ "tfa.metrics.r_square.RSquare.__call__": true,
+ "tfa.metrics.r_square.RSquare.__eq__": true,
+ "tfa.metrics.r_square.RSquare.__ge__": true,
+ "tfa.metrics.r_square.RSquare.__gt__": true,
+ "tfa.metrics.r_square.RSquare.__init__": true,
+ "tfa.metrics.r_square.RSquare.__le__": true,
+ "tfa.metrics.r_square.RSquare.__lt__": true,
+ "tfa.metrics.r_square.RSquare.__ne__": true,
+ "tfa.metrics.r_square.RSquare.__new__": true,
+ "tfa.metrics.r_square.RSquare.activity_regularizer": true,
+ "tfa.metrics.r_square.RSquare.add_loss": true,
+ "tfa.metrics.r_square.RSquare.add_metric": true,
+ "tfa.metrics.r_square.RSquare.add_update": true,
+ "tfa.metrics.r_square.RSquare.add_weight": true,
+ "tfa.metrics.r_square.RSquare.build": true,
+ "tfa.metrics.r_square.RSquare.call": true,
+ "tfa.metrics.r_square.RSquare.compute_mask": true,
+ "tfa.metrics.r_square.RSquare.compute_output_shape": true,
+ "tfa.metrics.r_square.RSquare.compute_output_signature": true,
+ "tfa.metrics.r_square.RSquare.count_params": true,
+ "tfa.metrics.r_square.RSquare.dtype": true,
+ "tfa.metrics.r_square.RSquare.dynamic": true,
+ "tfa.metrics.r_square.RSquare.from_config": true,
+ "tfa.metrics.r_square.RSquare.get_config": true,
+ "tfa.metrics.r_square.RSquare.get_input_at": true,
+ "tfa.metrics.r_square.RSquare.get_input_mask_at": true,
+ "tfa.metrics.r_square.RSquare.get_input_shape_at": true,
+ "tfa.metrics.r_square.RSquare.get_losses_for": true,
+ "tfa.metrics.r_square.RSquare.get_output_at": true,
+ "tfa.metrics.r_square.RSquare.get_output_mask_at": true,
+ "tfa.metrics.r_square.RSquare.get_output_shape_at": true,
+ "tfa.metrics.r_square.RSquare.get_updates_for": true,
+ "tfa.metrics.r_square.RSquare.get_weights": true,
+ "tfa.metrics.r_square.RSquare.input": true,
+ "tfa.metrics.r_square.RSquare.input_mask": true,
+ "tfa.metrics.r_square.RSquare.input_shape": true,
+ "tfa.metrics.r_square.RSquare.input_spec": true,
+ "tfa.metrics.r_square.RSquare.losses": true,
+ "tfa.metrics.r_square.RSquare.metrics": true,
+ "tfa.metrics.r_square.RSquare.name": true,
+ "tfa.metrics.r_square.RSquare.name_scope": true,
+ "tfa.metrics.r_square.RSquare.non_trainable_variables": true,
+ "tfa.metrics.r_square.RSquare.non_trainable_weights": true,
+ "tfa.metrics.r_square.RSquare.output": true,
+ "tfa.metrics.r_square.RSquare.output_mask": true,
+ "tfa.metrics.r_square.RSquare.output_shape": true,
+ "tfa.metrics.r_square.RSquare.reset_states": true,
+ "tfa.metrics.r_square.RSquare.result": true,
+ "tfa.metrics.r_square.RSquare.set_weights": true,
+ "tfa.metrics.r_square.RSquare.submodules": true,
+ "tfa.metrics.r_square.RSquare.trainable": true,
+ "tfa.metrics.r_square.RSquare.trainable_variables": true,
+ "tfa.metrics.r_square.RSquare.trainable_weights": true,
+ "tfa.metrics.r_square.RSquare.update_state": true,
+ "tfa.metrics.r_square.RSquare.updates": true,
+ "tfa.metrics.r_square.RSquare.variables": true,
+ "tfa.metrics.r_square.RSquare.weights": true,
+ "tfa.metrics.r_square.RSquare.with_name_scope": true,
+ "tfa.metrics.r_square.absolute_import": true,
+ "tfa.metrics.r_square.division": true,
+ "tfa.metrics.r_square.print_function": true,
+ "tfa.metrics.utils": false,
+ "tfa.metrics.utils.MeanMetricWrapper": false,
+ "tfa.metrics.utils.MeanMetricWrapper.__call__": true,
+ "tfa.metrics.utils.MeanMetricWrapper.__eq__": true,
+ "tfa.metrics.utils.MeanMetricWrapper.__ge__": true,
+ "tfa.metrics.utils.MeanMetricWrapper.__gt__": true,
+ "tfa.metrics.utils.MeanMetricWrapper.__init__": true,
+ "tfa.metrics.utils.MeanMetricWrapper.__le__": true,
+ "tfa.metrics.utils.MeanMetricWrapper.__lt__": true,
+ "tfa.metrics.utils.MeanMetricWrapper.__ne__": true,
+ "tfa.metrics.utils.MeanMetricWrapper.__new__": true,
+ "tfa.metrics.utils.MeanMetricWrapper.activity_regularizer": true,
+ "tfa.metrics.utils.MeanMetricWrapper.add_loss": true,
+ "tfa.metrics.utils.MeanMetricWrapper.add_metric": true,
+ "tfa.metrics.utils.MeanMetricWrapper.add_update": true,
+ "tfa.metrics.utils.MeanMetricWrapper.add_weight": true,
+ "tfa.metrics.utils.MeanMetricWrapper.build": true,
+ "tfa.metrics.utils.MeanMetricWrapper.call": true,
+ "tfa.metrics.utils.MeanMetricWrapper.compute_mask": true,
+ "tfa.metrics.utils.MeanMetricWrapper.compute_output_shape": true,
+ "tfa.metrics.utils.MeanMetricWrapper.compute_output_signature": true,
+ "tfa.metrics.utils.MeanMetricWrapper.count_params": true,
+ "tfa.metrics.utils.MeanMetricWrapper.dtype": true,
+ "tfa.metrics.utils.MeanMetricWrapper.dynamic": true,
+ "tfa.metrics.utils.MeanMetricWrapper.from_config": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_config": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_input_at": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_input_mask_at": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_input_shape_at": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_losses_for": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_output_at": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_output_mask_at": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_output_shape_at": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_updates_for": true,
+ "tfa.metrics.utils.MeanMetricWrapper.get_weights": true,
+ "tfa.metrics.utils.MeanMetricWrapper.input": true,
+ "tfa.metrics.utils.MeanMetricWrapper.input_mask": true,
+ "tfa.metrics.utils.MeanMetricWrapper.input_shape": true,
+ "tfa.metrics.utils.MeanMetricWrapper.input_spec": true,
+ "tfa.metrics.utils.MeanMetricWrapper.losses": true,
+ "tfa.metrics.utils.MeanMetricWrapper.metrics": true,
+ "tfa.metrics.utils.MeanMetricWrapper.name": true,
+ "tfa.metrics.utils.MeanMetricWrapper.name_scope": true,
+ "tfa.metrics.utils.MeanMetricWrapper.non_trainable_variables": true,
+ "tfa.metrics.utils.MeanMetricWrapper.non_trainable_weights": true,
+ "tfa.metrics.utils.MeanMetricWrapper.output": true,
+ "tfa.metrics.utils.MeanMetricWrapper.output_mask": true,
+ "tfa.metrics.utils.MeanMetricWrapper.output_shape": true,
+ "tfa.metrics.utils.MeanMetricWrapper.reset_states": true,
+ "tfa.metrics.utils.MeanMetricWrapper.result": true,
+ "tfa.metrics.utils.MeanMetricWrapper.set_weights": true,
+ "tfa.metrics.utils.MeanMetricWrapper.submodules": true,
+ "tfa.metrics.utils.MeanMetricWrapper.trainable": true,
+ "tfa.metrics.utils.MeanMetricWrapper.trainable_variables": true,
+ "tfa.metrics.utils.MeanMetricWrapper.trainable_weights": true,
+ "tfa.metrics.utils.MeanMetricWrapper.update_state": true,
+ "tfa.metrics.utils.MeanMetricWrapper.updates": true,
+ "tfa.metrics.utils.MeanMetricWrapper.variables": true,
+ "tfa.metrics.utils.MeanMetricWrapper.weights": true,
+ "tfa.metrics.utils.MeanMetricWrapper.with_name_scope": true,
+ "tfa.metrics.utils.absolute_import": true,
+ "tfa.metrics.utils.division": true,
+ "tfa.metrics.utils.print_function": true,
+ "tfa.optimizers": false,
+ "tfa.optimizers.AdamW": false,
+ "tfa.optimizers.AdamW.__eq__": true,
+ "tfa.optimizers.AdamW.__ge__": true,
+ "tfa.optimizers.AdamW.__gt__": true,
+ "tfa.optimizers.AdamW.__init__": true,
+ "tfa.optimizers.AdamW.__le__": true,
+ "tfa.optimizers.AdamW.__lt__": true,
+ "tfa.optimizers.AdamW.__ne__": true,
+ "tfa.optimizers.AdamW.__new__": true,
+ "tfa.optimizers.AdamW.add_slot": true,
+ "tfa.optimizers.AdamW.add_weight": true,
+ "tfa.optimizers.AdamW.apply_gradients": true,
+ "tfa.optimizers.AdamW.from_config": true,
+ "tfa.optimizers.AdamW.get_config": true,
+ "tfa.optimizers.AdamW.get_gradients": true,
+ "tfa.optimizers.AdamW.get_slot": true,
+ "tfa.optimizers.AdamW.get_slot_names": true,
+ "tfa.optimizers.AdamW.get_updates": true,
+ "tfa.optimizers.AdamW.get_weights": true,
+ "tfa.optimizers.AdamW.iterations": true,
+ "tfa.optimizers.AdamW.minimize": true,
+ "tfa.optimizers.AdamW.set_weights": true,
+ "tfa.optimizers.AdamW.variables": true,
+ "tfa.optimizers.AdamW.weights": true,
+ "tfa.optimizers.AveragedOptimizerWrapper": false,
+ "tfa.optimizers.AveragedOptimizerWrapper.__eq__": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.__ge__": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.__gt__": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.__init__": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.__le__": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.__lt__": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.__ne__": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.__new__": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.add_slot": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.add_weight": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.apply_gradients": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.assign_average_vars": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.average_op": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.from_config": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.get_config": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.get_gradients": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.get_slot": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.get_slot_names": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.get_updates": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.get_weights": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.iterations": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.learning_rate": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.lr": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.minimize": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.set_weights": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.variables": true,
+ "tfa.optimizers.AveragedOptimizerWrapper.weights": true,
+ "tfa.optimizers.ConditionalGradient": false,
+ "tfa.optimizers.ConditionalGradient.__eq__": true,
+ "tfa.optimizers.ConditionalGradient.__ge__": true,
+ "tfa.optimizers.ConditionalGradient.__gt__": true,
+ "tfa.optimizers.ConditionalGradient.__init__": true,
+ "tfa.optimizers.ConditionalGradient.__le__": true,
+ "tfa.optimizers.ConditionalGradient.__lt__": true,
+ "tfa.optimizers.ConditionalGradient.__ne__": true,
+ "tfa.optimizers.ConditionalGradient.__new__": true,
+ "tfa.optimizers.ConditionalGradient.add_slot": true,
+ "tfa.optimizers.ConditionalGradient.add_weight": true,
+ "tfa.optimizers.ConditionalGradient.apply_gradients": true,
+ "tfa.optimizers.ConditionalGradient.from_config": true,
+ "tfa.optimizers.ConditionalGradient.get_config": true,
+ "tfa.optimizers.ConditionalGradient.get_gradients": true,
+ "tfa.optimizers.ConditionalGradient.get_slot": true,
+ "tfa.optimizers.ConditionalGradient.get_slot_names": true,
+ "tfa.optimizers.ConditionalGradient.get_updates": true,
+ "tfa.optimizers.ConditionalGradient.get_weights": true,
+ "tfa.optimizers.ConditionalGradient.iterations": true,
+ "tfa.optimizers.ConditionalGradient.minimize": true,
+ "tfa.optimizers.ConditionalGradient.set_weights": true,
+ "tfa.optimizers.ConditionalGradient.variables": true,
+ "tfa.optimizers.ConditionalGradient.weights": true,
+ "tfa.optimizers.CyclicalLearningRate": false,
+ "tfa.optimizers.CyclicalLearningRate.__call__": true,
+ "tfa.optimizers.CyclicalLearningRate.__eq__": true,
+ "tfa.optimizers.CyclicalLearningRate.__ge__": true,
+ "tfa.optimizers.CyclicalLearningRate.__gt__": true,
+ "tfa.optimizers.CyclicalLearningRate.__init__": true,
+ "tfa.optimizers.CyclicalLearningRate.__le__": true,
+ "tfa.optimizers.CyclicalLearningRate.__lt__": true,
+ "tfa.optimizers.CyclicalLearningRate.__ne__": true,
+ "tfa.optimizers.CyclicalLearningRate.__new__": true,
+ "tfa.optimizers.CyclicalLearningRate.from_config": true,
+ "tfa.optimizers.CyclicalLearningRate.get_config": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate": false,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__call__": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__eq__": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__ge__": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__gt__": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__init__": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__le__": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__lt__": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__ne__": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.__new__": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.from_config": true,
+ "tfa.optimizers.ExponentialCyclicalLearningRate.get_config": true,
+ "tfa.optimizers.LAMB": false,
+ "tfa.optimizers.LAMB.__eq__": true,
+ "tfa.optimizers.LAMB.__ge__": true,
+ "tfa.optimizers.LAMB.__gt__": true,
+ "tfa.optimizers.LAMB.__init__": true,
+ "tfa.optimizers.LAMB.__le__": true,
+ "tfa.optimizers.LAMB.__lt__": true,
+ "tfa.optimizers.LAMB.__ne__": true,
+ "tfa.optimizers.LAMB.__new__": true,
+ "tfa.optimizers.LAMB.add_slot": true,
+ "tfa.optimizers.LAMB.add_weight": true,
+ "tfa.optimizers.LAMB.apply_gradients": true,
+ "tfa.optimizers.LAMB.from_config": true,
+ "tfa.optimizers.LAMB.get_config": true,
+ "tfa.optimizers.LAMB.get_gradients": true,
+ "tfa.optimizers.LAMB.get_slot": true,
+ "tfa.optimizers.LAMB.get_slot_names": true,
+ "tfa.optimizers.LAMB.get_updates": true,
+ "tfa.optimizers.LAMB.get_weights": true,
+ "tfa.optimizers.LAMB.iterations": true,
+ "tfa.optimizers.LAMB.minimize": true,
+ "tfa.optimizers.LAMB.set_weights": true,
+ "tfa.optimizers.LAMB.variables": true,
+ "tfa.optimizers.LAMB.weights": true,
+ "tfa.optimizers.LazyAdam": false,
+ "tfa.optimizers.LazyAdam.__eq__": true,
+ "tfa.optimizers.LazyAdam.__ge__": true,
+ "tfa.optimizers.LazyAdam.__gt__": true,
+ "tfa.optimizers.LazyAdam.__init__": true,
+ "tfa.optimizers.LazyAdam.__le__": true,
+ "tfa.optimizers.LazyAdam.__lt__": true,
+ "tfa.optimizers.LazyAdam.__ne__": true,
+ "tfa.optimizers.LazyAdam.__new__": true,
+ "tfa.optimizers.LazyAdam.add_slot": true,
+ "tfa.optimizers.LazyAdam.add_weight": true,
+ "tfa.optimizers.LazyAdam.apply_gradients": true,
+ "tfa.optimizers.LazyAdam.from_config": true,
+ "tfa.optimizers.LazyAdam.get_config": true,
+ "tfa.optimizers.LazyAdam.get_gradients": true,
+ "tfa.optimizers.LazyAdam.get_slot": true,
+ "tfa.optimizers.LazyAdam.get_slot_names": true,
+ "tfa.optimizers.LazyAdam.get_updates": true,
+ "tfa.optimizers.LazyAdam.get_weights": true,
+ "tfa.optimizers.LazyAdam.iterations": true,
+ "tfa.optimizers.LazyAdam.minimize": true,
+ "tfa.optimizers.LazyAdam.set_weights": true,
+ "tfa.optimizers.LazyAdam.variables": true,
+ "tfa.optimizers.LazyAdam.weights": true,
+ "tfa.optimizers.Lookahead": false,
+ "tfa.optimizers.Lookahead.__eq__": true,
+ "tfa.optimizers.Lookahead.__ge__": true,
+ "tfa.optimizers.Lookahead.__gt__": true,
+ "tfa.optimizers.Lookahead.__init__": true,
+ "tfa.optimizers.Lookahead.__le__": true,
+ "tfa.optimizers.Lookahead.__lt__": true,
+ "tfa.optimizers.Lookahead.__ne__": true,
+ "tfa.optimizers.Lookahead.__new__": true,
+ "tfa.optimizers.Lookahead.add_slot": true,
+ "tfa.optimizers.Lookahead.add_weight": true,
+ "tfa.optimizers.Lookahead.apply_gradients": true,
+ "tfa.optimizers.Lookahead.from_config": true,
+ "tfa.optimizers.Lookahead.get_config": true,
+ "tfa.optimizers.Lookahead.get_gradients": true,
+ "tfa.optimizers.Lookahead.get_slot": true,
+ "tfa.optimizers.Lookahead.get_slot_names": true,
+ "tfa.optimizers.Lookahead.get_updates": true,
+ "tfa.optimizers.Lookahead.get_weights": true,
+ "tfa.optimizers.Lookahead.iterations": true,
+ "tfa.optimizers.Lookahead.learning_rate": true,
+ "tfa.optimizers.Lookahead.lr": true,
+ "tfa.optimizers.Lookahead.minimize": true,
+ "tfa.optimizers.Lookahead.set_weights": true,
+ "tfa.optimizers.Lookahead.variables": true,
+ "tfa.optimizers.Lookahead.weights": true,
+ "tfa.optimizers.MovingAverage": false,
+ "tfa.optimizers.MovingAverage.__eq__": true,
+ "tfa.optimizers.MovingAverage.__ge__": true,
+ "tfa.optimizers.MovingAverage.__gt__": true,
+ "tfa.optimizers.MovingAverage.__init__": true,
+ "tfa.optimizers.MovingAverage.__le__": true,
+ "tfa.optimizers.MovingAverage.__lt__": true,
+ "tfa.optimizers.MovingAverage.__ne__": true,
+ "tfa.optimizers.MovingAverage.__new__": true,
+ "tfa.optimizers.MovingAverage.add_slot": true,
+ "tfa.optimizers.MovingAverage.add_weight": true,
+ "tfa.optimizers.MovingAverage.apply_gradients": true,
+ "tfa.optimizers.MovingAverage.assign_average_vars": true,
+ "tfa.optimizers.MovingAverage.average_op": true,
+ "tfa.optimizers.MovingAverage.from_config": true,
+ "tfa.optimizers.MovingAverage.get_config": true,
+ "tfa.optimizers.MovingAverage.get_gradients": true,
+ "tfa.optimizers.MovingAverage.get_slot": true,
+ "tfa.optimizers.MovingAverage.get_slot_names": true,
+ "tfa.optimizers.MovingAverage.get_updates": true,
+ "tfa.optimizers.MovingAverage.get_weights": true,
+ "tfa.optimizers.MovingAverage.iterations": true,
+ "tfa.optimizers.MovingAverage.learning_rate": true,
+ "tfa.optimizers.MovingAverage.lr": true,
+ "tfa.optimizers.MovingAverage.minimize": true,
+ "tfa.optimizers.MovingAverage.set_weights": true,
+ "tfa.optimizers.MovingAverage.variables": true,
+ "tfa.optimizers.MovingAverage.weights": true,
+ "tfa.optimizers.RectifiedAdam": false,
+ "tfa.optimizers.RectifiedAdam.__eq__": true,
+ "tfa.optimizers.RectifiedAdam.__ge__": true,
+ "tfa.optimizers.RectifiedAdam.__gt__": true,
+ "tfa.optimizers.RectifiedAdam.__init__": true,
+ "tfa.optimizers.RectifiedAdam.__le__": true,
+ "tfa.optimizers.RectifiedAdam.__lt__": true,
+ "tfa.optimizers.RectifiedAdam.__ne__": true,
+ "tfa.optimizers.RectifiedAdam.__new__": true,
+ "tfa.optimizers.RectifiedAdam.add_slot": true,
+ "tfa.optimizers.RectifiedAdam.add_weight": true,
+ "tfa.optimizers.RectifiedAdam.apply_gradients": true,
+ "tfa.optimizers.RectifiedAdam.from_config": true,
+ "tfa.optimizers.RectifiedAdam.get_config": true,
+ "tfa.optimizers.RectifiedAdam.get_gradients": true,
+ "tfa.optimizers.RectifiedAdam.get_slot": true,
+ "tfa.optimizers.RectifiedAdam.get_slot_names": true,
+ "tfa.optimizers.RectifiedAdam.get_updates": true,
+ "tfa.optimizers.RectifiedAdam.get_weights": true,
+ "tfa.optimizers.RectifiedAdam.iterations": true,
+ "tfa.optimizers.RectifiedAdam.minimize": true,
+ "tfa.optimizers.RectifiedAdam.set_weights": true,
+ "tfa.optimizers.RectifiedAdam.variables": true,
+ "tfa.optimizers.RectifiedAdam.weights": true,
+ "tfa.optimizers.SGDW": false,
+ "tfa.optimizers.SGDW.__eq__": true,
+ "tfa.optimizers.SGDW.__ge__": true,
+ "tfa.optimizers.SGDW.__gt__": true,
+ "tfa.optimizers.SGDW.__init__": true,
+ "tfa.optimizers.SGDW.__le__": true,
+ "tfa.optimizers.SGDW.__lt__": true,
+ "tfa.optimizers.SGDW.__ne__": true,
+ "tfa.optimizers.SGDW.__new__": true,
+ "tfa.optimizers.SGDW.add_slot": true,
+ "tfa.optimizers.SGDW.add_weight": true,
+ "tfa.optimizers.SGDW.apply_gradients": true,
+ "tfa.optimizers.SGDW.from_config": true,
+ "tfa.optimizers.SGDW.get_config": true,
+ "tfa.optimizers.SGDW.get_gradients": true,
+ "tfa.optimizers.SGDW.get_slot": true,
+ "tfa.optimizers.SGDW.get_slot_names": true,
+ "tfa.optimizers.SGDW.get_updates": true,
+ "tfa.optimizers.SGDW.get_weights": true,
+ "tfa.optimizers.SGDW.iterations": true,
+ "tfa.optimizers.SGDW.minimize": true,
+ "tfa.optimizers.SGDW.set_weights": true,
+ "tfa.optimizers.SGDW.variables": true,
+ "tfa.optimizers.SGDW.weights": true,
+ "tfa.optimizers.SWA": false,
+ "tfa.optimizers.SWA.__eq__": true,
+ "tfa.optimizers.SWA.__ge__": true,
+ "tfa.optimizers.SWA.__gt__": true,
+ "tfa.optimizers.SWA.__init__": true,
+ "tfa.optimizers.SWA.__le__": true,
+ "tfa.optimizers.SWA.__lt__": true,
+ "tfa.optimizers.SWA.__ne__": true,
+ "tfa.optimizers.SWA.__new__": true,
+ "tfa.optimizers.SWA.add_slot": true,
+ "tfa.optimizers.SWA.add_weight": true,
+ "tfa.optimizers.SWA.apply_gradients": true,
+ "tfa.optimizers.SWA.assign_average_vars": true,
+ "tfa.optimizers.SWA.average_op": true,
+ "tfa.optimizers.SWA.from_config": true,
+ "tfa.optimizers.SWA.get_config": true,
+ "tfa.optimizers.SWA.get_gradients": true,
+ "tfa.optimizers.SWA.get_slot": true,
+ "tfa.optimizers.SWA.get_slot_names": true,
+ "tfa.optimizers.SWA.get_updates": true,
+ "tfa.optimizers.SWA.get_weights": true,
+ "tfa.optimizers.SWA.iterations": true,
+ "tfa.optimizers.SWA.learning_rate": true,
+ "tfa.optimizers.SWA.lr": true,
+ "tfa.optimizers.SWA.minimize": true,
+ "tfa.optimizers.SWA.set_weights": true,
+ "tfa.optimizers.SWA.variables": true,
+ "tfa.optimizers.SWA.weights": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate": false,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__call__": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__eq__": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__ge__": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__gt__": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__init__": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__le__": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__lt__": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__ne__": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.__new__": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.from_config": true,
+ "tfa.optimizers.Triangular2CyclicalLearningRate.get_config": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate": false,
+ "tfa.optimizers.TriangularCyclicalLearningRate.__call__": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.__eq__": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.__ge__": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.__gt__": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.__init__": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.__le__": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.__lt__": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.__ne__": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.__new__": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.from_config": true,
+ "tfa.optimizers.TriangularCyclicalLearningRate.get_config": true,
+ "tfa.optimizers.absolute_import": true,
+ "tfa.optimizers.average_wrapper": false,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper": false,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__eq__": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__ge__": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__gt__": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__init__": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__le__": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__lt__": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__ne__": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.__new__": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.add_slot": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.add_weight": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.apply_gradients": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.assign_average_vars": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.average_op": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.from_config": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_config": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_gradients": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_slot": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_slot_names": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_updates": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.get_weights": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.iterations": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.learning_rate": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.lr": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.minimize": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.set_weights": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.variables": true,
+ "tfa.optimizers.average_wrapper.AveragedOptimizerWrapper.weights": true,
+ "tfa.optimizers.average_wrapper.absolute_import": true,
+ "tfa.optimizers.average_wrapper.division": true,
+ "tfa.optimizers.average_wrapper.print_function": true,
+ "tfa.optimizers.conditional_gradient": false,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient": false,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__eq__": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__ge__": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__gt__": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__init__": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__le__": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__lt__": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__ne__": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.__new__": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.add_slot": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.add_weight": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.apply_gradients": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.from_config": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_config": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_gradients": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_slot": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_slot_names": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_updates": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.get_weights": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.iterations": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.minimize": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.set_weights": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.variables": true,
+ "tfa.optimizers.conditional_gradient.ConditionalGradient.weights": true,
+ "tfa.optimizers.conditional_gradient.absolute_import": true,
+ "tfa.optimizers.conditional_gradient.division": true,
+ "tfa.optimizers.conditional_gradient.print_function": true,
+ "tfa.optimizers.cyclical_learning_rate": false,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate": false,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__call__": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__eq__": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__ge__": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__gt__": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__init__": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__le__": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__lt__": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__ne__": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.__new__": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.from_config": true,
+ "tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate.get_config": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate": false,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__call__": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__eq__": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__ge__": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__gt__": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__init__": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__le__": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__lt__": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__ne__": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.__new__": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.from_config": true,
+ "tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate.get_config": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate": false,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__call__": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__eq__": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__ge__": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__gt__": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__init__": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__le__": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__lt__": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__ne__": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.__new__": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.from_config": true,
+ "tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate.get_config": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate": false,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__call__": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__eq__": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__ge__": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__gt__": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__init__": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__le__": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__lt__": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__ne__": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.__new__": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.from_config": true,
+ "tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate.get_config": true,
+ "tfa.optimizers.cyclical_learning_rate.absolute_import": true,
+ "tfa.optimizers.cyclical_learning_rate.division": true,
+ "tfa.optimizers.cyclical_learning_rate.print_function": true,
+ "tfa.optimizers.division": true,
+ "tfa.optimizers.extend_with_decoupled_weight_decay": false,
+ "tfa.optimizers.lamb": false,
+ "tfa.optimizers.lamb.LAMB": false,
+ "tfa.optimizers.lamb.LAMB.__eq__": true,
+ "tfa.optimizers.lamb.LAMB.__ge__": true,
+ "tfa.optimizers.lamb.LAMB.__gt__": true,
+ "tfa.optimizers.lamb.LAMB.__init__": true,
+ "tfa.optimizers.lamb.LAMB.__le__": true,
+ "tfa.optimizers.lamb.LAMB.__lt__": true,
+ "tfa.optimizers.lamb.LAMB.__ne__": true,
+ "tfa.optimizers.lamb.LAMB.__new__": true,
+ "tfa.optimizers.lamb.LAMB.add_slot": true,
+ "tfa.optimizers.lamb.LAMB.add_weight": true,
+ "tfa.optimizers.lamb.LAMB.apply_gradients": true,
+ "tfa.optimizers.lamb.LAMB.from_config": true,
+ "tfa.optimizers.lamb.LAMB.get_config": true,
+ "tfa.optimizers.lamb.LAMB.get_gradients": true,
+ "tfa.optimizers.lamb.LAMB.get_slot": true,
+ "tfa.optimizers.lamb.LAMB.get_slot_names": true,
+ "tfa.optimizers.lamb.LAMB.get_updates": true,
+ "tfa.optimizers.lamb.LAMB.get_weights": true,
+ "tfa.optimizers.lamb.LAMB.iterations": true,
+ "tfa.optimizers.lamb.LAMB.minimize": true,
+ "tfa.optimizers.lamb.LAMB.set_weights": true,
+ "tfa.optimizers.lamb.LAMB.variables": true,
+ "tfa.optimizers.lamb.LAMB.weights": true,
+ "tfa.optimizers.lamb.absolute_import": true,
+ "tfa.optimizers.lamb.division": true,
+ "tfa.optimizers.lamb.print_function": true,
+ "tfa.optimizers.lazy_adam": false,
+ "tfa.optimizers.lazy_adam.LazyAdam": false,
+ "tfa.optimizers.lazy_adam.LazyAdam.__eq__": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.__ge__": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.__gt__": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.__init__": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.__le__": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.__lt__": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.__ne__": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.__new__": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.add_slot": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.add_weight": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.apply_gradients": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.from_config": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.get_config": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.get_gradients": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.get_slot": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.get_slot_names": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.get_updates": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.get_weights": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.iterations": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.minimize": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.set_weights": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.variables": true,
+ "tfa.optimizers.lazy_adam.LazyAdam.weights": true,
+ "tfa.optimizers.lazy_adam.absolute_import": true,
+ "tfa.optimizers.lazy_adam.division": true,
+ "tfa.optimizers.lazy_adam.print_function": true,
+ "tfa.optimizers.lookahead": false,
+ "tfa.optimizers.lookahead.Lookahead": false,
+ "tfa.optimizers.lookahead.Lookahead.__eq__": true,
+ "tfa.optimizers.lookahead.Lookahead.__ge__": true,
+ "tfa.optimizers.lookahead.Lookahead.__gt__": true,
+ "tfa.optimizers.lookahead.Lookahead.__init__": true,
+ "tfa.optimizers.lookahead.Lookahead.__le__": true,
+ "tfa.optimizers.lookahead.Lookahead.__lt__": true,
+ "tfa.optimizers.lookahead.Lookahead.__ne__": true,
+ "tfa.optimizers.lookahead.Lookahead.__new__": true,
+ "tfa.optimizers.lookahead.Lookahead.add_slot": true,
+ "tfa.optimizers.lookahead.Lookahead.add_weight": true,
+ "tfa.optimizers.lookahead.Lookahead.apply_gradients": true,
+ "tfa.optimizers.lookahead.Lookahead.from_config": true,
+ "tfa.optimizers.lookahead.Lookahead.get_config": true,
+ "tfa.optimizers.lookahead.Lookahead.get_gradients": true,
+ "tfa.optimizers.lookahead.Lookahead.get_slot": true,
+ "tfa.optimizers.lookahead.Lookahead.get_slot_names": true,
+ "tfa.optimizers.lookahead.Lookahead.get_updates": true,
+ "tfa.optimizers.lookahead.Lookahead.get_weights": true,
+ "tfa.optimizers.lookahead.Lookahead.iterations": true,
+ "tfa.optimizers.lookahead.Lookahead.learning_rate": true,
+ "tfa.optimizers.lookahead.Lookahead.lr": true,
+ "tfa.optimizers.lookahead.Lookahead.minimize": true,
+ "tfa.optimizers.lookahead.Lookahead.set_weights": true,
+ "tfa.optimizers.lookahead.Lookahead.variables": true,
+ "tfa.optimizers.lookahead.Lookahead.weights": true,
+ "tfa.optimizers.lookahead.absolute_import": true,
+ "tfa.optimizers.lookahead.division": true,
+ "tfa.optimizers.lookahead.print_function": true,
+ "tfa.optimizers.moving_average": false,
+ "tfa.optimizers.moving_average.MovingAverage": false,
+ "tfa.optimizers.moving_average.MovingAverage.__eq__": true,
+ "tfa.optimizers.moving_average.MovingAverage.__ge__": true,
+ "tfa.optimizers.moving_average.MovingAverage.__gt__": true,
+ "tfa.optimizers.moving_average.MovingAverage.__init__": true,
+ "tfa.optimizers.moving_average.MovingAverage.__le__": true,
+ "tfa.optimizers.moving_average.MovingAverage.__lt__": true,
+ "tfa.optimizers.moving_average.MovingAverage.__ne__": true,
+ "tfa.optimizers.moving_average.MovingAverage.__new__": true,
+ "tfa.optimizers.moving_average.MovingAverage.add_slot": true,
+ "tfa.optimizers.moving_average.MovingAverage.add_weight": true,
+ "tfa.optimizers.moving_average.MovingAverage.apply_gradients": true,
+ "tfa.optimizers.moving_average.MovingAverage.assign_average_vars": true,
+ "tfa.optimizers.moving_average.MovingAverage.average_op": true,
+ "tfa.optimizers.moving_average.MovingAverage.from_config": true,
+ "tfa.optimizers.moving_average.MovingAverage.get_config": true,
+ "tfa.optimizers.moving_average.MovingAverage.get_gradients": true,
+ "tfa.optimizers.moving_average.MovingAverage.get_slot": true,
+ "tfa.optimizers.moving_average.MovingAverage.get_slot_names": true,
+ "tfa.optimizers.moving_average.MovingAverage.get_updates": true,
+ "tfa.optimizers.moving_average.MovingAverage.get_weights": true,
+ "tfa.optimizers.moving_average.MovingAverage.iterations": true,
+ "tfa.optimizers.moving_average.MovingAverage.learning_rate": true,
+ "tfa.optimizers.moving_average.MovingAverage.lr": true,
+ "tfa.optimizers.moving_average.MovingAverage.minimize": true,
+ "tfa.optimizers.moving_average.MovingAverage.set_weights": true,
+ "tfa.optimizers.moving_average.MovingAverage.variables": true,
+ "tfa.optimizers.moving_average.MovingAverage.weights": true,
+ "tfa.optimizers.moving_average.absolute_import": true,
+ "tfa.optimizers.moving_average.division": true,
+ "tfa.optimizers.moving_average.print_function": true,
+ "tfa.optimizers.print_function": true,
+ "tfa.optimizers.rectified_adam": false,
+ "tfa.optimizers.rectified_adam.RectifiedAdam": false,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__eq__": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__ge__": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__gt__": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__init__": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__le__": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__lt__": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__ne__": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.__new__": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.add_slot": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.add_weight": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.apply_gradients": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.from_config": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_config": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_gradients": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_slot": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_slot_names": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_updates": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.get_weights": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.iterations": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.minimize": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.set_weights": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.variables": true,
+ "tfa.optimizers.rectified_adam.RectifiedAdam.weights": true,
+ "tfa.optimizers.rectified_adam.absolute_import": true,
+ "tfa.optimizers.rectified_adam.division": true,
+ "tfa.optimizers.rectified_adam.print_function": true,
+ "tfa.optimizers.stochastic_weight_averaging": false,
+ "tfa.optimizers.stochastic_weight_averaging.SWA": false,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__eq__": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__ge__": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__gt__": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__init__": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__le__": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__lt__": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__ne__": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.__new__": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.add_slot": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.add_weight": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.apply_gradients": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.assign_average_vars": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.average_op": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.from_config": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_config": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_gradients": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_slot": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_slot_names": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_updates": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.get_weights": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.iterations": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.learning_rate": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.lr": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.minimize": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.set_weights": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.variables": true,
+ "tfa.optimizers.stochastic_weight_averaging.SWA.weights": true,
+ "tfa.optimizers.stochastic_weight_averaging.absolute_import": true,
+ "tfa.optimizers.stochastic_weight_averaging.division": true,
+ "tfa.optimizers.stochastic_weight_averaging.print_function": true,
+ "tfa.optimizers.weight_decay_optimizers": false,
+ "tfa.optimizers.weight_decay_optimizers.AdamW": false,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__eq__": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__ge__": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__gt__": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__init__": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__le__": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__lt__": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__ne__": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.__new__": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.add_slot": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.add_weight": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.apply_gradients": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.from_config": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_config": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_gradients": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_slot": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_slot_names": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_updates": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.get_weights": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.iterations": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.minimize": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.set_weights": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.variables": true,
+ "tfa.optimizers.weight_decay_optimizers.AdamW.weights": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension": false,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__eq__": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__ge__": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__gt__": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__init__": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__le__": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__lt__": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__ne__": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.__new__": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.apply_gradients": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.get_config": true,
+ "tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension.minimize": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW": false,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__eq__": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__ge__": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__gt__": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__init__": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__le__": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__lt__": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__ne__": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.__new__": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.add_slot": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.add_weight": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.apply_gradients": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.from_config": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_config": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_gradients": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_slot": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_slot_names": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_updates": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.get_weights": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.iterations": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.minimize": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.set_weights": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.variables": true,
+ "tfa.optimizers.weight_decay_optimizers.SGDW.weights": true,
+ "tfa.optimizers.weight_decay_optimizers.absolute_import": true,
+ "tfa.optimizers.weight_decay_optimizers.division": true,
+ "tfa.optimizers.weight_decay_optimizers.extend_with_decoupled_weight_decay": false,
+ "tfa.optimizers.weight_decay_optimizers.print_function": true,
+ "tfa.rnn": false,
+ "tfa.rnn.LayerNormLSTMCell": false,
+ "tfa.rnn.LayerNormLSTMCell.__call__": true,
+ "tfa.rnn.LayerNormLSTMCell.__eq__": true,
+ "tfa.rnn.LayerNormLSTMCell.__ge__": true,
+ "tfa.rnn.LayerNormLSTMCell.__gt__": true,
+ "tfa.rnn.LayerNormLSTMCell.__init__": true,
+ "tfa.rnn.LayerNormLSTMCell.__le__": true,
+ "tfa.rnn.LayerNormLSTMCell.__lt__": true,
+ "tfa.rnn.LayerNormLSTMCell.__ne__": true,
+ "tfa.rnn.LayerNormLSTMCell.__new__": true,
+ "tfa.rnn.LayerNormLSTMCell.activity_regularizer": true,
+ "tfa.rnn.LayerNormLSTMCell.add_loss": true,
+ "tfa.rnn.LayerNormLSTMCell.add_metric": true,
+ "tfa.rnn.LayerNormLSTMCell.add_update": true,
+ "tfa.rnn.LayerNormLSTMCell.add_weight": true,
+ "tfa.rnn.LayerNormLSTMCell.build": true,
+ "tfa.rnn.LayerNormLSTMCell.call": true,
+ "tfa.rnn.LayerNormLSTMCell.compute_mask": true,
+ "tfa.rnn.LayerNormLSTMCell.compute_output_shape": true,
+ "tfa.rnn.LayerNormLSTMCell.compute_output_signature": true,
+ "tfa.rnn.LayerNormLSTMCell.count_params": true,
+ "tfa.rnn.LayerNormLSTMCell.dtype": true,
+ "tfa.rnn.LayerNormLSTMCell.dynamic": true,
+ "tfa.rnn.LayerNormLSTMCell.from_config": true,
+ "tfa.rnn.LayerNormLSTMCell.get_config": true,
+ "tfa.rnn.LayerNormLSTMCell.get_dropout_mask_for_cell": true,
+ "tfa.rnn.LayerNormLSTMCell.get_initial_state": true,
+ "tfa.rnn.LayerNormLSTMCell.get_input_at": true,
+ "tfa.rnn.LayerNormLSTMCell.get_input_mask_at": true,
+ "tfa.rnn.LayerNormLSTMCell.get_input_shape_at": true,
+ "tfa.rnn.LayerNormLSTMCell.get_losses_for": true,
+ "tfa.rnn.LayerNormLSTMCell.get_output_at": true,
+ "tfa.rnn.LayerNormLSTMCell.get_output_mask_at": true,
+ "tfa.rnn.LayerNormLSTMCell.get_output_shape_at": true,
+ "tfa.rnn.LayerNormLSTMCell.get_recurrent_dropout_mask_for_cell": true,
+ "tfa.rnn.LayerNormLSTMCell.get_updates_for": true,
+ "tfa.rnn.LayerNormLSTMCell.get_weights": true,
+ "tfa.rnn.LayerNormLSTMCell.input": true,
+ "tfa.rnn.LayerNormLSTMCell.input_mask": true,
+ "tfa.rnn.LayerNormLSTMCell.input_shape": true,
+ "tfa.rnn.LayerNormLSTMCell.input_spec": true,
+ "tfa.rnn.LayerNormLSTMCell.losses": true,
+ "tfa.rnn.LayerNormLSTMCell.metrics": true,
+ "tfa.rnn.LayerNormLSTMCell.name": true,
+ "tfa.rnn.LayerNormLSTMCell.name_scope": true,
+ "tfa.rnn.LayerNormLSTMCell.non_trainable_variables": true,
+ "tfa.rnn.LayerNormLSTMCell.non_trainable_weights": true,
+ "tfa.rnn.LayerNormLSTMCell.output": true,
+ "tfa.rnn.LayerNormLSTMCell.output_mask": true,
+ "tfa.rnn.LayerNormLSTMCell.output_shape": true,
+ "tfa.rnn.LayerNormLSTMCell.reset_dropout_mask": true,
+ "tfa.rnn.LayerNormLSTMCell.reset_recurrent_dropout_mask": true,
+ "tfa.rnn.LayerNormLSTMCell.set_weights": true,
+ "tfa.rnn.LayerNormLSTMCell.submodules": true,
+ "tfa.rnn.LayerNormLSTMCell.trainable": true,
+ "tfa.rnn.LayerNormLSTMCell.trainable_variables": true,
+ "tfa.rnn.LayerNormLSTMCell.trainable_weights": true,
+ "tfa.rnn.LayerNormLSTMCell.updates": true,
+ "tfa.rnn.LayerNormLSTMCell.variables": true,
+ "tfa.rnn.LayerNormLSTMCell.weights": true,
+ "tfa.rnn.LayerNormLSTMCell.with_name_scope": true,
+ "tfa.rnn.NASCell": false,
+ "tfa.rnn.NASCell.__call__": true,
+ "tfa.rnn.NASCell.__eq__": true,
+ "tfa.rnn.NASCell.__ge__": true,
+ "tfa.rnn.NASCell.__gt__": true,
+ "tfa.rnn.NASCell.__init__": true,
+ "tfa.rnn.NASCell.__le__": true,
+ "tfa.rnn.NASCell.__lt__": true,
+ "tfa.rnn.NASCell.__ne__": true,
+ "tfa.rnn.NASCell.__new__": true,
+ "tfa.rnn.NASCell.activity_regularizer": true,
+ "tfa.rnn.NASCell.add_loss": true,
+ "tfa.rnn.NASCell.add_metric": true,
+ "tfa.rnn.NASCell.add_update": true,
+ "tfa.rnn.NASCell.add_weight": true,
+ "tfa.rnn.NASCell.build": true,
+ "tfa.rnn.NASCell.call": true,
+ "tfa.rnn.NASCell.compute_mask": true,
+ "tfa.rnn.NASCell.compute_output_shape": true,
+ "tfa.rnn.NASCell.compute_output_signature": true,
+ "tfa.rnn.NASCell.count_params": true,
+ "tfa.rnn.NASCell.dtype": true,
+ "tfa.rnn.NASCell.dynamic": true,
+ "tfa.rnn.NASCell.from_config": true,
+ "tfa.rnn.NASCell.get_config": true,
+ "tfa.rnn.NASCell.get_initial_state": true,
+ "tfa.rnn.NASCell.get_input_at": true,
+ "tfa.rnn.NASCell.get_input_mask_at": true,
+ "tfa.rnn.NASCell.get_input_shape_at": true,
+ "tfa.rnn.NASCell.get_losses_for": true,
+ "tfa.rnn.NASCell.get_output_at": true,
+ "tfa.rnn.NASCell.get_output_mask_at": true,
+ "tfa.rnn.NASCell.get_output_shape_at": true,
+ "tfa.rnn.NASCell.get_updates_for": true,
+ "tfa.rnn.NASCell.get_weights": true,
+ "tfa.rnn.NASCell.input": true,
+ "tfa.rnn.NASCell.input_mask": true,
+ "tfa.rnn.NASCell.input_shape": true,
+ "tfa.rnn.NASCell.input_spec": true,
+ "tfa.rnn.NASCell.losses": true,
+ "tfa.rnn.NASCell.metrics": true,
+ "tfa.rnn.NASCell.name": true,
+ "tfa.rnn.NASCell.name_scope": true,
+ "tfa.rnn.NASCell.non_trainable_variables": true,
+ "tfa.rnn.NASCell.non_trainable_weights": true,
+ "tfa.rnn.NASCell.output": true,
+ "tfa.rnn.NASCell.output_mask": true,
+ "tfa.rnn.NASCell.output_shape": true,
+ "tfa.rnn.NASCell.output_size": true,
+ "tfa.rnn.NASCell.set_weights": true,
+ "tfa.rnn.NASCell.state_size": true,
+ "tfa.rnn.NASCell.submodules": true,
+ "tfa.rnn.NASCell.trainable": true,
+ "tfa.rnn.NASCell.trainable_variables": true,
+ "tfa.rnn.NASCell.trainable_weights": true,
+ "tfa.rnn.NASCell.updates": true,
+ "tfa.rnn.NASCell.variables": true,
+ "tfa.rnn.NASCell.weights": true,
+ "tfa.rnn.NASCell.with_name_scope": true,
+ "tfa.rnn.absolute_import": true,
+ "tfa.rnn.cell": false,
+ "tfa.rnn.cell.LayerNormLSTMCell": false,
+ "tfa.rnn.cell.LayerNormLSTMCell.__call__": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.__eq__": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.__ge__": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.__gt__": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.__init__": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.__le__": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.__lt__": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.__ne__": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.__new__": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.activity_regularizer": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.add_loss": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.add_metric": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.add_update": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.add_weight": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.build": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.call": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.compute_mask": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.compute_output_shape": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.compute_output_signature": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.count_params": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.dtype": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.dynamic": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.from_config": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_config": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_dropout_mask_for_cell": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_initial_state": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_input_at": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_input_mask_at": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_input_shape_at": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_losses_for": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_output_at": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_output_mask_at": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_output_shape_at": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_recurrent_dropout_mask_for_cell": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_updates_for": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.get_weights": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.input": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.input_mask": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.input_shape": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.input_spec": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.losses": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.metrics": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.name": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.name_scope": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.non_trainable_variables": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.non_trainable_weights": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.output": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.output_mask": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.output_shape": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.reset_dropout_mask": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.reset_recurrent_dropout_mask": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.set_weights": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.submodules": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.trainable": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.trainable_variables": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.trainable_weights": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.updates": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.variables": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.weights": true,
+ "tfa.rnn.cell.LayerNormLSTMCell.with_name_scope": true,
+ "tfa.rnn.cell.NASCell": false,
+ "tfa.rnn.cell.NASCell.__call__": true,
+ "tfa.rnn.cell.NASCell.__eq__": true,
+ "tfa.rnn.cell.NASCell.__ge__": true,
+ "tfa.rnn.cell.NASCell.__gt__": true,
+ "tfa.rnn.cell.NASCell.__init__": true,
+ "tfa.rnn.cell.NASCell.__le__": true,
+ "tfa.rnn.cell.NASCell.__lt__": true,
+ "tfa.rnn.cell.NASCell.__ne__": true,
+ "tfa.rnn.cell.NASCell.__new__": true,
+ "tfa.rnn.cell.NASCell.activity_regularizer": true,
+ "tfa.rnn.cell.NASCell.add_loss": true,
+ "tfa.rnn.cell.NASCell.add_metric": true,
+ "tfa.rnn.cell.NASCell.add_update": true,
+ "tfa.rnn.cell.NASCell.add_weight": true,
+ "tfa.rnn.cell.NASCell.build": true,
+ "tfa.rnn.cell.NASCell.call": true,
+ "tfa.rnn.cell.NASCell.compute_mask": true,
+ "tfa.rnn.cell.NASCell.compute_output_shape": true,
+ "tfa.rnn.cell.NASCell.compute_output_signature": true,
+ "tfa.rnn.cell.NASCell.count_params": true,
+ "tfa.rnn.cell.NASCell.dtype": true,
+ "tfa.rnn.cell.NASCell.dynamic": true,
+ "tfa.rnn.cell.NASCell.from_config": true,
+ "tfa.rnn.cell.NASCell.get_config": true,
+ "tfa.rnn.cell.NASCell.get_initial_state": true,
+ "tfa.rnn.cell.NASCell.get_input_at": true,
+ "tfa.rnn.cell.NASCell.get_input_mask_at": true,
+ "tfa.rnn.cell.NASCell.get_input_shape_at": true,
+ "tfa.rnn.cell.NASCell.get_losses_for": true,
+ "tfa.rnn.cell.NASCell.get_output_at": true,
+ "tfa.rnn.cell.NASCell.get_output_mask_at": true,
+ "tfa.rnn.cell.NASCell.get_output_shape_at": true,
+ "tfa.rnn.cell.NASCell.get_updates_for": true,
+ "tfa.rnn.cell.NASCell.get_weights": true,
+ "tfa.rnn.cell.NASCell.input": true,
+ "tfa.rnn.cell.NASCell.input_mask": true,
+ "tfa.rnn.cell.NASCell.input_shape": true,
+ "tfa.rnn.cell.NASCell.input_spec": true,
+ "tfa.rnn.cell.NASCell.losses": true,
+ "tfa.rnn.cell.NASCell.metrics": true,
+ "tfa.rnn.cell.NASCell.name": true,
+ "tfa.rnn.cell.NASCell.name_scope": true,
+ "tfa.rnn.cell.NASCell.non_trainable_variables": true,
+ "tfa.rnn.cell.NASCell.non_trainable_weights": true,
+ "tfa.rnn.cell.NASCell.output": true,
+ "tfa.rnn.cell.NASCell.output_mask": true,
+ "tfa.rnn.cell.NASCell.output_shape": true,
+ "tfa.rnn.cell.NASCell.output_size": true,
+ "tfa.rnn.cell.NASCell.set_weights": true,
+ "tfa.rnn.cell.NASCell.state_size": true,
+ "tfa.rnn.cell.NASCell.submodules": true,
+ "tfa.rnn.cell.NASCell.trainable": true,
+ "tfa.rnn.cell.NASCell.trainable_variables": true,
+ "tfa.rnn.cell.NASCell.trainable_weights": true,
+ "tfa.rnn.cell.NASCell.updates": true,
+ "tfa.rnn.cell.NASCell.variables": true,
+ "tfa.rnn.cell.NASCell.weights": true,
+ "tfa.rnn.cell.NASCell.with_name_scope": true,
+ "tfa.rnn.cell.absolute_import": true,
+ "tfa.rnn.cell.division": true,
+ "tfa.rnn.cell.print_function": true,
+ "tfa.rnn.division": true,
+ "tfa.rnn.print_function": true,
+ "tfa.seq2seq": false,
+ "tfa.seq2seq.AttentionMechanism": false,
+ "tfa.seq2seq.AttentionMechanism.__eq__": true,
+ "tfa.seq2seq.AttentionMechanism.__ge__": true,
+ "tfa.seq2seq.AttentionMechanism.__gt__": true,
+ "tfa.seq2seq.AttentionMechanism.__init__": true,
+ "tfa.seq2seq.AttentionMechanism.__le__": true,
+ "tfa.seq2seq.AttentionMechanism.__lt__": true,
+ "tfa.seq2seq.AttentionMechanism.__ne__": true,
+ "tfa.seq2seq.AttentionMechanism.__new__": true,
+ "tfa.seq2seq.AttentionMechanism.alignments_size": true,
+ "tfa.seq2seq.AttentionMechanism.state_size": true,
+ "tfa.seq2seq.AttentionWrapper": false,
+ "tfa.seq2seq.AttentionWrapper.__call__": true,
+ "tfa.seq2seq.AttentionWrapper.__eq__": true,
+ "tfa.seq2seq.AttentionWrapper.__ge__": true,
+ "tfa.seq2seq.AttentionWrapper.__gt__": true,
+ "tfa.seq2seq.AttentionWrapper.__init__": true,
+ "tfa.seq2seq.AttentionWrapper.__le__": true,
+ "tfa.seq2seq.AttentionWrapper.__lt__": true,
+ "tfa.seq2seq.AttentionWrapper.__ne__": true,
+ "tfa.seq2seq.AttentionWrapper.__new__": true,
+ "tfa.seq2seq.AttentionWrapper.activity_regularizer": true,
+ "tfa.seq2seq.AttentionWrapper.add_loss": true,
+ "tfa.seq2seq.AttentionWrapper.add_metric": true,
+ "tfa.seq2seq.AttentionWrapper.add_update": true,
+ "tfa.seq2seq.AttentionWrapper.add_weight": true,
+ "tfa.seq2seq.AttentionWrapper.build": true,
+ "tfa.seq2seq.AttentionWrapper.call": true,
+ "tfa.seq2seq.AttentionWrapper.compute_mask": true,
+ "tfa.seq2seq.AttentionWrapper.compute_output_shape": true,
+ "tfa.seq2seq.AttentionWrapper.compute_output_signature": true,
+ "tfa.seq2seq.AttentionWrapper.count_params": true,
+ "tfa.seq2seq.AttentionWrapper.dtype": true,
+ "tfa.seq2seq.AttentionWrapper.dynamic": true,
+ "tfa.seq2seq.AttentionWrapper.from_config": true,
+ "tfa.seq2seq.AttentionWrapper.get_config": true,
+ "tfa.seq2seq.AttentionWrapper.get_initial_state": true,
+ "tfa.seq2seq.AttentionWrapper.get_input_at": true,
+ "tfa.seq2seq.AttentionWrapper.get_input_mask_at": true,
+ "tfa.seq2seq.AttentionWrapper.get_input_shape_at": true,
+ "tfa.seq2seq.AttentionWrapper.get_losses_for": true,
+ "tfa.seq2seq.AttentionWrapper.get_output_at": true,
+ "tfa.seq2seq.AttentionWrapper.get_output_mask_at": true,
+ "tfa.seq2seq.AttentionWrapper.get_output_shape_at": true,
+ "tfa.seq2seq.AttentionWrapper.get_updates_for": true,
+ "tfa.seq2seq.AttentionWrapper.get_weights": true,
+ "tfa.seq2seq.AttentionWrapper.input": true,
+ "tfa.seq2seq.AttentionWrapper.input_mask": true,
+ "tfa.seq2seq.AttentionWrapper.input_shape": true,
+ "tfa.seq2seq.AttentionWrapper.input_spec": true,
+ "tfa.seq2seq.AttentionWrapper.losses": true,
+ "tfa.seq2seq.AttentionWrapper.metrics": true,
+ "tfa.seq2seq.AttentionWrapper.name": true,
+ "tfa.seq2seq.AttentionWrapper.name_scope": true,
+ "tfa.seq2seq.AttentionWrapper.non_trainable_variables": true,
+ "tfa.seq2seq.AttentionWrapper.non_trainable_weights": true,
+ "tfa.seq2seq.AttentionWrapper.output": true,
+ "tfa.seq2seq.AttentionWrapper.output_mask": true,
+ "tfa.seq2seq.AttentionWrapper.output_shape": true,
+ "tfa.seq2seq.AttentionWrapper.output_size": true,
+ "tfa.seq2seq.AttentionWrapper.set_weights": true,
+ "tfa.seq2seq.AttentionWrapper.state_size": true,
+ "tfa.seq2seq.AttentionWrapper.submodules": true,
+ "tfa.seq2seq.AttentionWrapper.trainable": true,
+ "tfa.seq2seq.AttentionWrapper.trainable_variables": true,
+ "tfa.seq2seq.AttentionWrapper.trainable_weights": true,
+ "tfa.seq2seq.AttentionWrapper.updates": true,
+ "tfa.seq2seq.AttentionWrapper.variables": true,
+ "tfa.seq2seq.AttentionWrapper.weights": true,
+ "tfa.seq2seq.AttentionWrapper.with_name_scope": true,
+ "tfa.seq2seq.AttentionWrapperState": false,
+ "tfa.seq2seq.AttentionWrapperState.__add__": true,
+ "tfa.seq2seq.AttentionWrapperState.__contains__": true,
+ "tfa.seq2seq.AttentionWrapperState.__eq__": true,
+ "tfa.seq2seq.AttentionWrapperState.__ge__": true,
+ "tfa.seq2seq.AttentionWrapperState.__getitem__": true,
+ "tfa.seq2seq.AttentionWrapperState.__gt__": true,
+ "tfa.seq2seq.AttentionWrapperState.__init__": true,
+ "tfa.seq2seq.AttentionWrapperState.__iter__": true,
+ "tfa.seq2seq.AttentionWrapperState.__le__": true,
+ "tfa.seq2seq.AttentionWrapperState.__len__": true,
+ "tfa.seq2seq.AttentionWrapperState.__lt__": true,
+ "tfa.seq2seq.AttentionWrapperState.__mul__": true,
+ "tfa.seq2seq.AttentionWrapperState.__ne__": true,
+ "tfa.seq2seq.AttentionWrapperState.__new__": true,
+ "tfa.seq2seq.AttentionWrapperState.__rmul__": true,
+ "tfa.seq2seq.AttentionWrapperState.alignment_history": true,
+ "tfa.seq2seq.AttentionWrapperState.alignments": true,
+ "tfa.seq2seq.AttentionWrapperState.attention": true,
+ "tfa.seq2seq.AttentionWrapperState.attention_state": true,
+ "tfa.seq2seq.AttentionWrapperState.cell_state": true,
+ "tfa.seq2seq.AttentionWrapperState.clone": true,
+ "tfa.seq2seq.AttentionWrapperState.count": true,
+ "tfa.seq2seq.AttentionWrapperState.index": true,
+ "tfa.seq2seq.AttentionWrapperState.time": true,
+ "tfa.seq2seq.BahdanauAttention": false,
+ "tfa.seq2seq.BahdanauAttention.__call__": true,
+ "tfa.seq2seq.BahdanauAttention.__eq__": true,
+ "tfa.seq2seq.BahdanauAttention.__ge__": true,
+ "tfa.seq2seq.BahdanauAttention.__gt__": true,
+ "tfa.seq2seq.BahdanauAttention.__init__": true,
+ "tfa.seq2seq.BahdanauAttention.__le__": true,
+ "tfa.seq2seq.BahdanauAttention.__lt__": true,
+ "tfa.seq2seq.BahdanauAttention.__ne__": true,
+ "tfa.seq2seq.BahdanauAttention.__new__": true,
+ "tfa.seq2seq.BahdanauAttention.activity_regularizer": true,
+ "tfa.seq2seq.BahdanauAttention.add_loss": true,
+ "tfa.seq2seq.BahdanauAttention.add_metric": true,
+ "tfa.seq2seq.BahdanauAttention.add_update": true,
+ "tfa.seq2seq.BahdanauAttention.add_weight": true,
+ "tfa.seq2seq.BahdanauAttention.alignments_size": true,
+ "tfa.seq2seq.BahdanauAttention.build": true,
+ "tfa.seq2seq.BahdanauAttention.call": true,
+ "tfa.seq2seq.BahdanauAttention.compute_mask": true,
+ "tfa.seq2seq.BahdanauAttention.compute_output_shape": true,
+ "tfa.seq2seq.BahdanauAttention.compute_output_signature": true,
+ "tfa.seq2seq.BahdanauAttention.count_params": true,
+ "tfa.seq2seq.BahdanauAttention.deserialize_inner_layer_from_config": true,
+ "tfa.seq2seq.BahdanauAttention.dtype": true,
+ "tfa.seq2seq.BahdanauAttention.dynamic": true,
+ "tfa.seq2seq.BahdanauAttention.from_config": true,
+ "tfa.seq2seq.BahdanauAttention.get_config": true,
+ "tfa.seq2seq.BahdanauAttention.get_input_at": true,
+ "tfa.seq2seq.BahdanauAttention.get_input_mask_at": true,
+ "tfa.seq2seq.BahdanauAttention.get_input_shape_at": true,
+ "tfa.seq2seq.BahdanauAttention.get_losses_for": true,
+ "tfa.seq2seq.BahdanauAttention.get_output_at": true,
+ "tfa.seq2seq.BahdanauAttention.get_output_mask_at": true,
+ "tfa.seq2seq.BahdanauAttention.get_output_shape_at": true,
+ "tfa.seq2seq.BahdanauAttention.get_updates_for": true,
+ "tfa.seq2seq.BahdanauAttention.get_weights": true,
+ "tfa.seq2seq.BahdanauAttention.initial_alignments": true,
+ "tfa.seq2seq.BahdanauAttention.initial_state": true,
+ "tfa.seq2seq.BahdanauAttention.input": true,
+ "tfa.seq2seq.BahdanauAttention.input_mask": true,
+ "tfa.seq2seq.BahdanauAttention.input_shape": true,
+ "tfa.seq2seq.BahdanauAttention.input_spec": true,
+ "tfa.seq2seq.BahdanauAttention.losses": true,
+ "tfa.seq2seq.BahdanauAttention.memory_initialized": true,
+ "tfa.seq2seq.BahdanauAttention.metrics": true,
+ "tfa.seq2seq.BahdanauAttention.name": true,
+ "tfa.seq2seq.BahdanauAttention.name_scope": true,
+ "tfa.seq2seq.BahdanauAttention.non_trainable_variables": true,
+ "tfa.seq2seq.BahdanauAttention.non_trainable_weights": true,
+ "tfa.seq2seq.BahdanauAttention.output": true,
+ "tfa.seq2seq.BahdanauAttention.output_mask": true,
+ "tfa.seq2seq.BahdanauAttention.output_shape": true,
+ "tfa.seq2seq.BahdanauAttention.set_weights": true,
+ "tfa.seq2seq.BahdanauAttention.setup_memory": true,
+ "tfa.seq2seq.BahdanauAttention.state_size": true,
+ "tfa.seq2seq.BahdanauAttention.submodules": true,
+ "tfa.seq2seq.BahdanauAttention.trainable": true,
+ "tfa.seq2seq.BahdanauAttention.trainable_variables": true,
+ "tfa.seq2seq.BahdanauAttention.trainable_weights": true,
+ "tfa.seq2seq.BahdanauAttention.updates": true,
+ "tfa.seq2seq.BahdanauAttention.variables": true,
+ "tfa.seq2seq.BahdanauAttention.weights": true,
+ "tfa.seq2seq.BahdanauAttention.with_name_scope": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention": false,
+ "tfa.seq2seq.BahdanauMonotonicAttention.__call__": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.__eq__": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.__ge__": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.__gt__": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.__init__": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.__le__": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.__lt__": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.__ne__": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.__new__": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.activity_regularizer": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.add_loss": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.add_metric": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.add_update": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.add_weight": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.alignments_size": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.build": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.call": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.compute_mask": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.compute_output_shape": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.compute_output_signature": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.count_params": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.deserialize_inner_layer_from_config": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.dtype": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.dynamic": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.from_config": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_config": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_input_at": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_input_mask_at": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_input_shape_at": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_losses_for": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_output_at": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_output_mask_at": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_output_shape_at": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_updates_for": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.get_weights": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.initial_alignments": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.initial_state": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.input": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.input_mask": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.input_shape": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.input_spec": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.losses": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.memory_initialized": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.metrics": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.name": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.name_scope": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.non_trainable_variables": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.non_trainable_weights": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.output": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.output_mask": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.output_shape": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.set_weights": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.setup_memory": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.state_size": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.submodules": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.trainable": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.trainable_variables": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.trainable_weights": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.updates": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.variables": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.weights": true,
+ "tfa.seq2seq.BahdanauMonotonicAttention.with_name_scope": true,
+ "tfa.seq2seq.BaseDecoder": false,
+ "tfa.seq2seq.BaseDecoder.__call__": true,
+ "tfa.seq2seq.BaseDecoder.__eq__": true,
+ "tfa.seq2seq.BaseDecoder.__ge__": true,
+ "tfa.seq2seq.BaseDecoder.__gt__": true,
+ "tfa.seq2seq.BaseDecoder.__init__": true,
+ "tfa.seq2seq.BaseDecoder.__le__": true,
+ "tfa.seq2seq.BaseDecoder.__lt__": true,
+ "tfa.seq2seq.BaseDecoder.__ne__": true,
+ "tfa.seq2seq.BaseDecoder.__new__": true,
+ "tfa.seq2seq.BaseDecoder.activity_regularizer": true,
+ "tfa.seq2seq.BaseDecoder.add_loss": true,
+ "tfa.seq2seq.BaseDecoder.add_metric": true,
+ "tfa.seq2seq.BaseDecoder.add_update": true,
+ "tfa.seq2seq.BaseDecoder.add_weight": true,
+ "tfa.seq2seq.BaseDecoder.batch_size": true,
+ "tfa.seq2seq.BaseDecoder.build": true,
+ "tfa.seq2seq.BaseDecoder.call": true,
+ "tfa.seq2seq.BaseDecoder.compute_mask": true,
+ "tfa.seq2seq.BaseDecoder.compute_output_shape": true,
+ "tfa.seq2seq.BaseDecoder.compute_output_signature": true,
+ "tfa.seq2seq.BaseDecoder.count_params": true,
+ "tfa.seq2seq.BaseDecoder.dtype": true,
+ "tfa.seq2seq.BaseDecoder.dynamic": true,
+ "tfa.seq2seq.BaseDecoder.finalize": true,
+ "tfa.seq2seq.BaseDecoder.from_config": true,
+ "tfa.seq2seq.BaseDecoder.get_config": true,
+ "tfa.seq2seq.BaseDecoder.get_input_at": true,
+ "tfa.seq2seq.BaseDecoder.get_input_mask_at": true,
+ "tfa.seq2seq.BaseDecoder.get_input_shape_at": true,
+ "tfa.seq2seq.BaseDecoder.get_losses_for": true,
+ "tfa.seq2seq.BaseDecoder.get_output_at": true,
+ "tfa.seq2seq.BaseDecoder.get_output_mask_at": true,
+ "tfa.seq2seq.BaseDecoder.get_output_shape_at": true,
+ "tfa.seq2seq.BaseDecoder.get_updates_for": true,
+ "tfa.seq2seq.BaseDecoder.get_weights": true,
+ "tfa.seq2seq.BaseDecoder.initialize": true,
+ "tfa.seq2seq.BaseDecoder.input": true,
+ "tfa.seq2seq.BaseDecoder.input_mask": true,
+ "tfa.seq2seq.BaseDecoder.input_shape": true,
+ "tfa.seq2seq.BaseDecoder.input_spec": true,
+ "tfa.seq2seq.BaseDecoder.losses": true,
+ "tfa.seq2seq.BaseDecoder.metrics": true,
+ "tfa.seq2seq.BaseDecoder.name": true,
+ "tfa.seq2seq.BaseDecoder.name_scope": true,
+ "tfa.seq2seq.BaseDecoder.non_trainable_variables": true,
+ "tfa.seq2seq.BaseDecoder.non_trainable_weights": true,
+ "tfa.seq2seq.BaseDecoder.output": true,
+ "tfa.seq2seq.BaseDecoder.output_dtype": true,
+ "tfa.seq2seq.BaseDecoder.output_mask": true,
+ "tfa.seq2seq.BaseDecoder.output_shape": true,
+ "tfa.seq2seq.BaseDecoder.output_size": true,
+ "tfa.seq2seq.BaseDecoder.set_weights": true,
+ "tfa.seq2seq.BaseDecoder.step": true,
+ "tfa.seq2seq.BaseDecoder.submodules": true,
+ "tfa.seq2seq.BaseDecoder.tracks_own_finished": true,
+ "tfa.seq2seq.BaseDecoder.trainable": true,
+ "tfa.seq2seq.BaseDecoder.trainable_variables": true,
+ "tfa.seq2seq.BaseDecoder.trainable_weights": true,
+ "tfa.seq2seq.BaseDecoder.updates": true,
+ "tfa.seq2seq.BaseDecoder.variables": true,
+ "tfa.seq2seq.BaseDecoder.weights": true,
+ "tfa.seq2seq.BaseDecoder.with_name_scope": true,
+ "tfa.seq2seq.BasicDecoder": false,
+ "tfa.seq2seq.BasicDecoder.__call__": true,
+ "tfa.seq2seq.BasicDecoder.__eq__": true,
+ "tfa.seq2seq.BasicDecoder.__ge__": true,
+ "tfa.seq2seq.BasicDecoder.__gt__": true,
+ "tfa.seq2seq.BasicDecoder.__init__": true,
+ "tfa.seq2seq.BasicDecoder.__le__": true,
+ "tfa.seq2seq.BasicDecoder.__lt__": true,
+ "tfa.seq2seq.BasicDecoder.__ne__": true,
+ "tfa.seq2seq.BasicDecoder.__new__": true,
+ "tfa.seq2seq.BasicDecoder.activity_regularizer": true,
+ "tfa.seq2seq.BasicDecoder.add_loss": true,
+ "tfa.seq2seq.BasicDecoder.add_metric": true,
+ "tfa.seq2seq.BasicDecoder.add_update": true,
+ "tfa.seq2seq.BasicDecoder.add_weight": true,
+ "tfa.seq2seq.BasicDecoder.batch_size": true,
+ "tfa.seq2seq.BasicDecoder.build": true,
+ "tfa.seq2seq.BasicDecoder.call": true,
+ "tfa.seq2seq.BasicDecoder.compute_mask": true,
+ "tfa.seq2seq.BasicDecoder.compute_output_shape": true,
+ "tfa.seq2seq.BasicDecoder.compute_output_signature": true,
+ "tfa.seq2seq.BasicDecoder.count_params": true,
+ "tfa.seq2seq.BasicDecoder.dtype": true,
+ "tfa.seq2seq.BasicDecoder.dynamic": true,
+ "tfa.seq2seq.BasicDecoder.finalize": true,
+ "tfa.seq2seq.BasicDecoder.from_config": true,
+ "tfa.seq2seq.BasicDecoder.get_config": true,
+ "tfa.seq2seq.BasicDecoder.get_input_at": true,
+ "tfa.seq2seq.BasicDecoder.get_input_mask_at": true,
+ "tfa.seq2seq.BasicDecoder.get_input_shape_at": true,
+ "tfa.seq2seq.BasicDecoder.get_losses_for": true,
+ "tfa.seq2seq.BasicDecoder.get_output_at": true,
+ "tfa.seq2seq.BasicDecoder.get_output_mask_at": true,
+ "tfa.seq2seq.BasicDecoder.get_output_shape_at": true,
+ "tfa.seq2seq.BasicDecoder.get_updates_for": true,
+ "tfa.seq2seq.BasicDecoder.get_weights": true,
+ "tfa.seq2seq.BasicDecoder.initialize": true,
+ "tfa.seq2seq.BasicDecoder.input": true,
+ "tfa.seq2seq.BasicDecoder.input_mask": true,
+ "tfa.seq2seq.BasicDecoder.input_shape": true,
+ "tfa.seq2seq.BasicDecoder.input_spec": true,
+ "tfa.seq2seq.BasicDecoder.losses": true,
+ "tfa.seq2seq.BasicDecoder.metrics": true,
+ "tfa.seq2seq.BasicDecoder.name": true,
+ "tfa.seq2seq.BasicDecoder.name_scope": true,
+ "tfa.seq2seq.BasicDecoder.non_trainable_variables": true,
+ "tfa.seq2seq.BasicDecoder.non_trainable_weights": true,
+ "tfa.seq2seq.BasicDecoder.output": true,
+ "tfa.seq2seq.BasicDecoder.output_dtype": true,
+ "tfa.seq2seq.BasicDecoder.output_mask": true,
+ "tfa.seq2seq.BasicDecoder.output_shape": true,
+ "tfa.seq2seq.BasicDecoder.output_size": true,
+ "tfa.seq2seq.BasicDecoder.set_weights": true,
+ "tfa.seq2seq.BasicDecoder.step": true,
+ "tfa.seq2seq.BasicDecoder.submodules": true,
+ "tfa.seq2seq.BasicDecoder.tracks_own_finished": true,
+ "tfa.seq2seq.BasicDecoder.trainable": true,
+ "tfa.seq2seq.BasicDecoder.trainable_variables": true,
+ "tfa.seq2seq.BasicDecoder.trainable_weights": true,
+ "tfa.seq2seq.BasicDecoder.updates": true,
+ "tfa.seq2seq.BasicDecoder.variables": true,
+ "tfa.seq2seq.BasicDecoder.weights": true,
+ "tfa.seq2seq.BasicDecoder.with_name_scope": true,
+ "tfa.seq2seq.BasicDecoderOutput": false,
+ "tfa.seq2seq.BasicDecoderOutput.__add__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__contains__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__eq__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__ge__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__getitem__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__gt__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__init__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__iter__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__le__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__len__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__lt__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__mul__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__ne__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__new__": true,
+ "tfa.seq2seq.BasicDecoderOutput.__rmul__": true,
+ "tfa.seq2seq.BasicDecoderOutput.count": true,
+ "tfa.seq2seq.BasicDecoderOutput.index": true,
+ "tfa.seq2seq.BasicDecoderOutput.rnn_output": true,
+ "tfa.seq2seq.BasicDecoderOutput.sample_id": true,
+ "tfa.seq2seq.BeamSearchDecoder": false,
+ "tfa.seq2seq.BeamSearchDecoder.__call__": true,
+ "tfa.seq2seq.BeamSearchDecoder.__eq__": true,
+ "tfa.seq2seq.BeamSearchDecoder.__ge__": true,
+ "tfa.seq2seq.BeamSearchDecoder.__gt__": true,
+ "tfa.seq2seq.BeamSearchDecoder.__init__": true,
+ "tfa.seq2seq.BeamSearchDecoder.__le__": true,
+ "tfa.seq2seq.BeamSearchDecoder.__lt__": true,
+ "tfa.seq2seq.BeamSearchDecoder.__ne__": true,
+ "tfa.seq2seq.BeamSearchDecoder.__new__": true,
+ "tfa.seq2seq.BeamSearchDecoder.activity_regularizer": true,
+ "tfa.seq2seq.BeamSearchDecoder.add_loss": true,
+ "tfa.seq2seq.BeamSearchDecoder.add_metric": true,
+ "tfa.seq2seq.BeamSearchDecoder.add_update": true,
+ "tfa.seq2seq.BeamSearchDecoder.add_weight": true,
+ "tfa.seq2seq.BeamSearchDecoder.batch_size": true,
+ "tfa.seq2seq.BeamSearchDecoder.build": true,
+ "tfa.seq2seq.BeamSearchDecoder.call": true,
+ "tfa.seq2seq.BeamSearchDecoder.compute_mask": true,
+ "tfa.seq2seq.BeamSearchDecoder.compute_output_shape": true,
+ "tfa.seq2seq.BeamSearchDecoder.compute_output_signature": true,
+ "tfa.seq2seq.BeamSearchDecoder.count_params": true,
+ "tfa.seq2seq.BeamSearchDecoder.dtype": true,
+ "tfa.seq2seq.BeamSearchDecoder.dynamic": true,
+ "tfa.seq2seq.BeamSearchDecoder.finalize": true,
+ "tfa.seq2seq.BeamSearchDecoder.from_config": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_config": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_input_at": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_input_mask_at": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_input_shape_at": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_losses_for": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_output_at": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_output_mask_at": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_output_shape_at": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_updates_for": true,
+ "tfa.seq2seq.BeamSearchDecoder.get_weights": true,
+ "tfa.seq2seq.BeamSearchDecoder.initialize": true,
+ "tfa.seq2seq.BeamSearchDecoder.input": true,
+ "tfa.seq2seq.BeamSearchDecoder.input_mask": true,
+ "tfa.seq2seq.BeamSearchDecoder.input_shape": true,
+ "tfa.seq2seq.BeamSearchDecoder.input_spec": true,
+ "tfa.seq2seq.BeamSearchDecoder.losses": true,
+ "tfa.seq2seq.BeamSearchDecoder.metrics": true,
+ "tfa.seq2seq.BeamSearchDecoder.name": true,
+ "tfa.seq2seq.BeamSearchDecoder.name_scope": true,
+ "tfa.seq2seq.BeamSearchDecoder.non_trainable_variables": true,
+ "tfa.seq2seq.BeamSearchDecoder.non_trainable_weights": true,
+ "tfa.seq2seq.BeamSearchDecoder.output": true,
+ "tfa.seq2seq.BeamSearchDecoder.output_dtype": true,
+ "tfa.seq2seq.BeamSearchDecoder.output_mask": true,
+ "tfa.seq2seq.BeamSearchDecoder.output_shape": true,
+ "tfa.seq2seq.BeamSearchDecoder.output_size": true,
+ "tfa.seq2seq.BeamSearchDecoder.set_weights": true,
+ "tfa.seq2seq.BeamSearchDecoder.step": true,
+ "tfa.seq2seq.BeamSearchDecoder.submodules": true,
+ "tfa.seq2seq.BeamSearchDecoder.tracks_own_finished": true,
+ "tfa.seq2seq.BeamSearchDecoder.trainable": true,
+ "tfa.seq2seq.BeamSearchDecoder.trainable_variables": true,
+ "tfa.seq2seq.BeamSearchDecoder.trainable_weights": true,
+ "tfa.seq2seq.BeamSearchDecoder.updates": true,
+ "tfa.seq2seq.BeamSearchDecoder.variables": true,
+ "tfa.seq2seq.BeamSearchDecoder.weights": true,
+ "tfa.seq2seq.BeamSearchDecoder.with_name_scope": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput": false,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__add__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__contains__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__eq__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__ge__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__getitem__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__gt__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__init__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__iter__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__le__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__len__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__lt__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__mul__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__ne__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__new__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.__rmul__": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.count": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.index": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.parent_ids": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.predicted_ids": true,
+ "tfa.seq2seq.BeamSearchDecoderOutput.scores": true,
+ "tfa.seq2seq.BeamSearchDecoderState": false,
+ "tfa.seq2seq.BeamSearchDecoderState.__add__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__contains__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__eq__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__ge__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__getitem__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__gt__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__init__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__iter__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__le__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__len__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__lt__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__mul__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__ne__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__new__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.__rmul__": true,
+ "tfa.seq2seq.BeamSearchDecoderState.accumulated_attention_probs": true,
+ "tfa.seq2seq.BeamSearchDecoderState.cell_state": true,
+ "tfa.seq2seq.BeamSearchDecoderState.count": true,
+ "tfa.seq2seq.BeamSearchDecoderState.finished": true,
+ "tfa.seq2seq.BeamSearchDecoderState.index": true,
+ "tfa.seq2seq.BeamSearchDecoderState.lengths": true,
+ "tfa.seq2seq.BeamSearchDecoderState.log_probs": true,
+ "tfa.seq2seq.CustomSampler": false,
+ "tfa.seq2seq.CustomSampler.__eq__": true,
+ "tfa.seq2seq.CustomSampler.__ge__": true,
+ "tfa.seq2seq.CustomSampler.__gt__": true,
+ "tfa.seq2seq.CustomSampler.__init__": true,
+ "tfa.seq2seq.CustomSampler.__le__": true,
+ "tfa.seq2seq.CustomSampler.__lt__": true,
+ "tfa.seq2seq.CustomSampler.__ne__": true,
+ "tfa.seq2seq.CustomSampler.__new__": true,
+ "tfa.seq2seq.CustomSampler.batch_size": true,
+ "tfa.seq2seq.CustomSampler.initialize": true,
+ "tfa.seq2seq.CustomSampler.next_inputs": true,
+ "tfa.seq2seq.CustomSampler.sample": true,
+ "tfa.seq2seq.CustomSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.CustomSampler.sample_ids_shape": true,
+ "tfa.seq2seq.Decoder": false,
+ "tfa.seq2seq.Decoder.__eq__": true,
+ "tfa.seq2seq.Decoder.__ge__": true,
+ "tfa.seq2seq.Decoder.__gt__": true,
+ "tfa.seq2seq.Decoder.__init__": true,
+ "tfa.seq2seq.Decoder.__le__": true,
+ "tfa.seq2seq.Decoder.__lt__": true,
+ "tfa.seq2seq.Decoder.__ne__": true,
+ "tfa.seq2seq.Decoder.__new__": true,
+ "tfa.seq2seq.Decoder.batch_size": true,
+ "tfa.seq2seq.Decoder.finalize": true,
+ "tfa.seq2seq.Decoder.initialize": true,
+ "tfa.seq2seq.Decoder.output_dtype": true,
+ "tfa.seq2seq.Decoder.output_size": true,
+ "tfa.seq2seq.Decoder.step": true,
+ "tfa.seq2seq.Decoder.tracks_own_finished": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput": false,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__add__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__contains__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__eq__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__ge__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__getitem__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__gt__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__init__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__iter__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__le__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__len__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__lt__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__mul__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__ne__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__new__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.__rmul__": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.beam_search_decoder_output": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.count": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.index": true,
+ "tfa.seq2seq.FinalBeamSearchDecoderOutput.predicted_ids": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler": false,
+ "tfa.seq2seq.GreedyEmbeddingSampler.__eq__": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.__ge__": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.__gt__": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.__init__": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.__le__": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.__lt__": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.__ne__": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.__new__": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.batch_size": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.initialize": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.next_inputs": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.sample": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.GreedyEmbeddingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.InferenceSampler": false,
+ "tfa.seq2seq.InferenceSampler.__eq__": true,
+ "tfa.seq2seq.InferenceSampler.__ge__": true,
+ "tfa.seq2seq.InferenceSampler.__gt__": true,
+ "tfa.seq2seq.InferenceSampler.__init__": true,
+ "tfa.seq2seq.InferenceSampler.__le__": true,
+ "tfa.seq2seq.InferenceSampler.__lt__": true,
+ "tfa.seq2seq.InferenceSampler.__ne__": true,
+ "tfa.seq2seq.InferenceSampler.__new__": true,
+ "tfa.seq2seq.InferenceSampler.batch_size": true,
+ "tfa.seq2seq.InferenceSampler.initialize": true,
+ "tfa.seq2seq.InferenceSampler.next_inputs": true,
+ "tfa.seq2seq.InferenceSampler.sample": true,
+ "tfa.seq2seq.InferenceSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.InferenceSampler.sample_ids_shape": true,
+ "tfa.seq2seq.LuongAttention": false,
+ "tfa.seq2seq.LuongAttention.__call__": true,
+ "tfa.seq2seq.LuongAttention.__eq__": true,
+ "tfa.seq2seq.LuongAttention.__ge__": true,
+ "tfa.seq2seq.LuongAttention.__gt__": true,
+ "tfa.seq2seq.LuongAttention.__init__": true,
+ "tfa.seq2seq.LuongAttention.__le__": true,
+ "tfa.seq2seq.LuongAttention.__lt__": true,
+ "tfa.seq2seq.LuongAttention.__ne__": true,
+ "tfa.seq2seq.LuongAttention.__new__": true,
+ "tfa.seq2seq.LuongAttention.activity_regularizer": true,
+ "tfa.seq2seq.LuongAttention.add_loss": true,
+ "tfa.seq2seq.LuongAttention.add_metric": true,
+ "tfa.seq2seq.LuongAttention.add_update": true,
+ "tfa.seq2seq.LuongAttention.add_weight": true,
+ "tfa.seq2seq.LuongAttention.alignments_size": true,
+ "tfa.seq2seq.LuongAttention.build": true,
+ "tfa.seq2seq.LuongAttention.call": true,
+ "tfa.seq2seq.LuongAttention.compute_mask": true,
+ "tfa.seq2seq.LuongAttention.compute_output_shape": true,
+ "tfa.seq2seq.LuongAttention.compute_output_signature": true,
+ "tfa.seq2seq.LuongAttention.count_params": true,
+ "tfa.seq2seq.LuongAttention.deserialize_inner_layer_from_config": true,
+ "tfa.seq2seq.LuongAttention.dtype": true,
+ "tfa.seq2seq.LuongAttention.dynamic": true,
+ "tfa.seq2seq.LuongAttention.from_config": true,
+ "tfa.seq2seq.LuongAttention.get_config": true,
+ "tfa.seq2seq.LuongAttention.get_input_at": true,
+ "tfa.seq2seq.LuongAttention.get_input_mask_at": true,
+ "tfa.seq2seq.LuongAttention.get_input_shape_at": true,
+ "tfa.seq2seq.LuongAttention.get_losses_for": true,
+ "tfa.seq2seq.LuongAttention.get_output_at": true,
+ "tfa.seq2seq.LuongAttention.get_output_mask_at": true,
+ "tfa.seq2seq.LuongAttention.get_output_shape_at": true,
+ "tfa.seq2seq.LuongAttention.get_updates_for": true,
+ "tfa.seq2seq.LuongAttention.get_weights": true,
+ "tfa.seq2seq.LuongAttention.initial_alignments": true,
+ "tfa.seq2seq.LuongAttention.initial_state": true,
+ "tfa.seq2seq.LuongAttention.input": true,
+ "tfa.seq2seq.LuongAttention.input_mask": true,
+ "tfa.seq2seq.LuongAttention.input_shape": true,
+ "tfa.seq2seq.LuongAttention.input_spec": true,
+ "tfa.seq2seq.LuongAttention.losses": true,
+ "tfa.seq2seq.LuongAttention.memory_initialized": true,
+ "tfa.seq2seq.LuongAttention.metrics": true,
+ "tfa.seq2seq.LuongAttention.name": true,
+ "tfa.seq2seq.LuongAttention.name_scope": true,
+ "tfa.seq2seq.LuongAttention.non_trainable_variables": true,
+ "tfa.seq2seq.LuongAttention.non_trainable_weights": true,
+ "tfa.seq2seq.LuongAttention.output": true,
+ "tfa.seq2seq.LuongAttention.output_mask": true,
+ "tfa.seq2seq.LuongAttention.output_shape": true,
+ "tfa.seq2seq.LuongAttention.set_weights": true,
+ "tfa.seq2seq.LuongAttention.setup_memory": true,
+ "tfa.seq2seq.LuongAttention.state_size": true,
+ "tfa.seq2seq.LuongAttention.submodules": true,
+ "tfa.seq2seq.LuongAttention.trainable": true,
+ "tfa.seq2seq.LuongAttention.trainable_variables": true,
+ "tfa.seq2seq.LuongAttention.trainable_weights": true,
+ "tfa.seq2seq.LuongAttention.updates": true,
+ "tfa.seq2seq.LuongAttention.variables": true,
+ "tfa.seq2seq.LuongAttention.weights": true,
+ "tfa.seq2seq.LuongAttention.with_name_scope": true,
+ "tfa.seq2seq.LuongMonotonicAttention": false,
+ "tfa.seq2seq.LuongMonotonicAttention.__call__": true,
+ "tfa.seq2seq.LuongMonotonicAttention.__eq__": true,
+ "tfa.seq2seq.LuongMonotonicAttention.__ge__": true,
+ "tfa.seq2seq.LuongMonotonicAttention.__gt__": true,
+ "tfa.seq2seq.LuongMonotonicAttention.__init__": true,
+ "tfa.seq2seq.LuongMonotonicAttention.__le__": true,
+ "tfa.seq2seq.LuongMonotonicAttention.__lt__": true,
+ "tfa.seq2seq.LuongMonotonicAttention.__ne__": true,
+ "tfa.seq2seq.LuongMonotonicAttention.__new__": true,
+ "tfa.seq2seq.LuongMonotonicAttention.activity_regularizer": true,
+ "tfa.seq2seq.LuongMonotonicAttention.add_loss": true,
+ "tfa.seq2seq.LuongMonotonicAttention.add_metric": true,
+ "tfa.seq2seq.LuongMonotonicAttention.add_update": true,
+ "tfa.seq2seq.LuongMonotonicAttention.add_weight": true,
+ "tfa.seq2seq.LuongMonotonicAttention.alignments_size": true,
+ "tfa.seq2seq.LuongMonotonicAttention.build": true,
+ "tfa.seq2seq.LuongMonotonicAttention.call": true,
+ "tfa.seq2seq.LuongMonotonicAttention.compute_mask": true,
+ "tfa.seq2seq.LuongMonotonicAttention.compute_output_shape": true,
+ "tfa.seq2seq.LuongMonotonicAttention.compute_output_signature": true,
+ "tfa.seq2seq.LuongMonotonicAttention.count_params": true,
+ "tfa.seq2seq.LuongMonotonicAttention.deserialize_inner_layer_from_config": true,
+ "tfa.seq2seq.LuongMonotonicAttention.dtype": true,
+ "tfa.seq2seq.LuongMonotonicAttention.dynamic": true,
+ "tfa.seq2seq.LuongMonotonicAttention.from_config": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_config": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_input_at": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_input_mask_at": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_input_shape_at": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_losses_for": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_output_at": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_output_mask_at": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_output_shape_at": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_updates_for": true,
+ "tfa.seq2seq.LuongMonotonicAttention.get_weights": true,
+ "tfa.seq2seq.LuongMonotonicAttention.initial_alignments": true,
+ "tfa.seq2seq.LuongMonotonicAttention.initial_state": true,
+ "tfa.seq2seq.LuongMonotonicAttention.input": true,
+ "tfa.seq2seq.LuongMonotonicAttention.input_mask": true,
+ "tfa.seq2seq.LuongMonotonicAttention.input_shape": true,
+ "tfa.seq2seq.LuongMonotonicAttention.input_spec": true,
+ "tfa.seq2seq.LuongMonotonicAttention.losses": true,
+ "tfa.seq2seq.LuongMonotonicAttention.memory_initialized": true,
+ "tfa.seq2seq.LuongMonotonicAttention.metrics": true,
+ "tfa.seq2seq.LuongMonotonicAttention.name": true,
+ "tfa.seq2seq.LuongMonotonicAttention.name_scope": true,
+ "tfa.seq2seq.LuongMonotonicAttention.non_trainable_variables": true,
+ "tfa.seq2seq.LuongMonotonicAttention.non_trainable_weights": true,
+ "tfa.seq2seq.LuongMonotonicAttention.output": true,
+ "tfa.seq2seq.LuongMonotonicAttention.output_mask": true,
+ "tfa.seq2seq.LuongMonotonicAttention.output_shape": true,
+ "tfa.seq2seq.LuongMonotonicAttention.set_weights": true,
+ "tfa.seq2seq.LuongMonotonicAttention.setup_memory": true,
+ "tfa.seq2seq.LuongMonotonicAttention.state_size": true,
+ "tfa.seq2seq.LuongMonotonicAttention.submodules": true,
+ "tfa.seq2seq.LuongMonotonicAttention.trainable": true,
+ "tfa.seq2seq.LuongMonotonicAttention.trainable_variables": true,
+ "tfa.seq2seq.LuongMonotonicAttention.trainable_weights": true,
+ "tfa.seq2seq.LuongMonotonicAttention.updates": true,
+ "tfa.seq2seq.LuongMonotonicAttention.variables": true,
+ "tfa.seq2seq.LuongMonotonicAttention.weights": true,
+ "tfa.seq2seq.LuongMonotonicAttention.with_name_scope": true,
+ "tfa.seq2seq.SampleEmbeddingSampler": false,
+ "tfa.seq2seq.SampleEmbeddingSampler.__eq__": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.__ge__": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.__gt__": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.__init__": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.__le__": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.__lt__": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.__ne__": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.__new__": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.batch_size": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.initialize": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.next_inputs": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.sample": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.SampleEmbeddingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.Sampler": false,
+ "tfa.seq2seq.Sampler.__eq__": true,
+ "tfa.seq2seq.Sampler.__ge__": true,
+ "tfa.seq2seq.Sampler.__gt__": true,
+ "tfa.seq2seq.Sampler.__init__": true,
+ "tfa.seq2seq.Sampler.__le__": true,
+ "tfa.seq2seq.Sampler.__lt__": true,
+ "tfa.seq2seq.Sampler.__ne__": true,
+ "tfa.seq2seq.Sampler.__new__": true,
+ "tfa.seq2seq.Sampler.batch_size": true,
+ "tfa.seq2seq.Sampler.initialize": true,
+ "tfa.seq2seq.Sampler.next_inputs": true,
+ "tfa.seq2seq.Sampler.sample": true,
+ "tfa.seq2seq.Sampler.sample_ids_dtype": true,
+ "tfa.seq2seq.Sampler.sample_ids_shape": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler": false,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__eq__": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__ge__": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__gt__": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__init__": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__le__": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__lt__": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__ne__": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.__new__": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.batch_size": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.initialize": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.next_inputs": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.sample": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.ScheduledEmbeddingTrainingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler": false,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__eq__": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__ge__": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__gt__": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__init__": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__le__": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__lt__": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__ne__": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.__new__": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.batch_size": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.initialize": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.next_inputs": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.sample": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.ScheduledOutputTrainingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.SequenceLoss": false,
+ "tfa.seq2seq.SequenceLoss.__call__": true,
+ "tfa.seq2seq.SequenceLoss.__eq__": true,
+ "tfa.seq2seq.SequenceLoss.__ge__": true,
+ "tfa.seq2seq.SequenceLoss.__gt__": true,
+ "tfa.seq2seq.SequenceLoss.__init__": true,
+ "tfa.seq2seq.SequenceLoss.__le__": true,
+ "tfa.seq2seq.SequenceLoss.__lt__": true,
+ "tfa.seq2seq.SequenceLoss.__ne__": true,
+ "tfa.seq2seq.SequenceLoss.__new__": true,
+ "tfa.seq2seq.SequenceLoss.call": true,
+ "tfa.seq2seq.SequenceLoss.from_config": true,
+ "tfa.seq2seq.SequenceLoss.get_config": true,
+ "tfa.seq2seq.TrainingSampler": false,
+ "tfa.seq2seq.TrainingSampler.__eq__": true,
+ "tfa.seq2seq.TrainingSampler.__ge__": true,
+ "tfa.seq2seq.TrainingSampler.__gt__": true,
+ "tfa.seq2seq.TrainingSampler.__init__": true,
+ "tfa.seq2seq.TrainingSampler.__le__": true,
+ "tfa.seq2seq.TrainingSampler.__lt__": true,
+ "tfa.seq2seq.TrainingSampler.__ne__": true,
+ "tfa.seq2seq.TrainingSampler.__new__": true,
+ "tfa.seq2seq.TrainingSampler.batch_size": true,
+ "tfa.seq2seq.TrainingSampler.initialize": true,
+ "tfa.seq2seq.TrainingSampler.next_inputs": true,
+ "tfa.seq2seq.TrainingSampler.sample": true,
+ "tfa.seq2seq.TrainingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.TrainingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.absolute_import": true,
+ "tfa.seq2seq.attention_wrapper": false,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism": false,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__eq__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__ge__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__gt__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__init__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__le__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__lt__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__ne__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.__new__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.alignments_size": true,
+ "tfa.seq2seq.attention_wrapper.AttentionMechanism.state_size": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper": false,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__call__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__eq__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__ge__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__gt__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__init__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__le__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__lt__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__ne__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.__new__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.activity_regularizer": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.add_loss": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.add_metric": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.add_update": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.add_weight": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.build": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.call": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.compute_mask": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.compute_output_shape": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.compute_output_signature": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.count_params": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.dtype": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.dynamic": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.from_config": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_config": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_initial_state": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_input_at": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_input_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_input_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_losses_for": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_output_at": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_output_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_output_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_updates_for": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.get_weights": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.input": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.input_mask": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.input_shape": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.input_spec": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.losses": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.metrics": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.name": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.name_scope": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.non_trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.non_trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.output": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.output_mask": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.output_shape": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.output_size": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.set_weights": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.state_size": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.submodules": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.trainable": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.updates": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.variables": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.weights": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapper.with_name_scope": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState": false,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__add__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__contains__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__eq__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__ge__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__getitem__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__gt__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__init__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__iter__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__le__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__len__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__lt__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__mul__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__ne__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__new__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.__rmul__": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.alignment_history": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.alignments": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.attention": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.attention_state": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.cell_state": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.clone": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.count": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.index": true,
+ "tfa.seq2seq.attention_wrapper.AttentionWrapperState.time": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention": false,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__call__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__eq__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__ge__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__gt__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__init__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__le__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__lt__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__ne__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.__new__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.activity_regularizer": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.add_loss": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.add_metric": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.add_update": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.add_weight": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.alignments_size": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.build": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.call": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.compute_mask": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.compute_output_shape": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.compute_output_signature": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.count_params": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.deserialize_inner_layer_from_config": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.dtype": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.dynamic": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.from_config": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_config": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_input_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_input_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_input_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_losses_for": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_output_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_output_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_output_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_updates_for": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.get_weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.initial_alignments": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.initial_state": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.input": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.input_mask": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.input_shape": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.input_spec": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.losses": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.memory_initialized": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.metrics": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.name": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.name_scope": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.non_trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.non_trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.output": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.output_mask": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.output_shape": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.set_weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.setup_memory": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.state_size": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.submodules": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.trainable": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.updates": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.variables": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauAttention.with_name_scope": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention": false,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__call__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__eq__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__ge__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__gt__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__init__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__le__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__lt__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__ne__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.__new__": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.activity_regularizer": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.add_loss": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.add_metric": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.add_update": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.add_weight": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.alignments_size": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.build": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.call": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.compute_mask": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.compute_output_shape": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.compute_output_signature": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.count_params": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.deserialize_inner_layer_from_config": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.dtype": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.dynamic": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.from_config": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_config": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_input_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_input_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_input_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_losses_for": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_output_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_output_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_output_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_updates_for": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.get_weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.initial_alignments": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.initial_state": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.input": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.input_mask": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.input_shape": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.input_spec": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.losses": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.memory_initialized": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.metrics": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.name": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.name_scope": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.non_trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.non_trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.output": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.output_mask": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.output_shape": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.set_weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.setup_memory": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.state_size": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.submodules": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.trainable": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.updates": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.variables": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.weights": true,
+ "tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention.with_name_scope": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention": false,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__call__": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__eq__": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__ge__": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__gt__": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__init__": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__le__": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__lt__": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__ne__": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.__new__": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.activity_regularizer": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.add_loss": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.add_metric": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.add_update": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.add_weight": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.alignments_size": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.build": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.call": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.compute_mask": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.compute_output_shape": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.compute_output_signature": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.count_params": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.deserialize_inner_layer_from_config": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.dtype": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.dynamic": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.from_config": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_config": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_input_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_input_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_input_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_losses_for": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_output_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_output_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_output_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_updates_for": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.get_weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.initial_alignments": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.initial_state": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.input": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.input_mask": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.input_shape": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.input_spec": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.losses": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.memory_initialized": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.metrics": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.name": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.name_scope": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.non_trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.non_trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.output": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.output_mask": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.output_shape": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.set_weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.setup_memory": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.state_size": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.submodules": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.trainable": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.updates": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.variables": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongAttention.with_name_scope": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention": false,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__call__": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__eq__": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__ge__": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__gt__": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__init__": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__le__": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__lt__": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__ne__": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.__new__": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.activity_regularizer": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.add_loss": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.add_metric": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.add_update": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.add_weight": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.alignments_size": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.build": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.call": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.compute_mask": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.compute_output_shape": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.compute_output_signature": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.count_params": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.deserialize_inner_layer_from_config": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.dtype": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.dynamic": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.from_config": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_config": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_input_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_input_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_input_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_losses_for": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_output_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_output_mask_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_output_shape_at": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_updates_for": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.get_weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.initial_alignments": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.initial_state": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.input": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.input_mask": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.input_shape": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.input_spec": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.losses": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.memory_initialized": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.metrics": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.name": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.name_scope": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.non_trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.non_trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.output": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.output_mask": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.output_shape": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.set_weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.setup_memory": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.state_size": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.submodules": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.trainable": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.trainable_variables": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.trainable_weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.updates": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.variables": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.weights": true,
+ "tfa.seq2seq.attention_wrapper.LuongMonotonicAttention.with_name_scope": true,
+ "tfa.seq2seq.attention_wrapper.absolute_import": true,
+ "tfa.seq2seq.attention_wrapper.division": true,
+ "tfa.seq2seq.attention_wrapper.hardmax": false,
+ "tfa.seq2seq.attention_wrapper.monotonic_attention": false,
+ "tfa.seq2seq.attention_wrapper.print_function": true,
+ "tfa.seq2seq.attention_wrapper.safe_cumprod": false,
+ "tfa.seq2seq.basic_decoder": false,
+ "tfa.seq2seq.basic_decoder.BasicDecoder": false,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__call__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__eq__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__ge__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__gt__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__init__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__le__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__lt__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__ne__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.__new__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.activity_regularizer": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.add_loss": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.add_metric": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.add_update": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.add_weight": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.batch_size": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.build": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.call": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.compute_mask": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.compute_output_shape": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.compute_output_signature": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.count_params": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.dtype": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.dynamic": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.finalize": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.from_config": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_config": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_input_at": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_input_mask_at": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_input_shape_at": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_losses_for": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_output_at": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_output_mask_at": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_output_shape_at": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_updates_for": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.get_weights": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.initialize": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.input": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.input_mask": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.input_shape": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.input_spec": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.losses": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.metrics": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.name": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.name_scope": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.non_trainable_variables": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.non_trainable_weights": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output_dtype": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output_mask": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output_shape": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.output_size": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.set_weights": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.step": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.submodules": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.tracks_own_finished": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.trainable": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.trainable_variables": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.trainable_weights": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.updates": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.variables": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.weights": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoder.with_name_scope": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput": false,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__add__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__contains__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__eq__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__ge__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__getitem__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__gt__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__init__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__iter__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__le__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__len__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__lt__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__mul__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__ne__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__new__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.__rmul__": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.count": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.index": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.rnn_output": true,
+ "tfa.seq2seq.basic_decoder.BasicDecoderOutput.sample_id": true,
+ "tfa.seq2seq.basic_decoder.absolute_import": true,
+ "tfa.seq2seq.basic_decoder.division": true,
+ "tfa.seq2seq.basic_decoder.print_function": true,
+ "tfa.seq2seq.beam_search_decoder": false,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder": false,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__call__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__eq__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__ge__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__gt__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__init__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__le__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__lt__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__ne__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.__new__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.activity_regularizer": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.add_loss": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.add_metric": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.add_update": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.add_weight": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.batch_size": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.build": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.call": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.compute_mask": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.compute_output_shape": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.compute_output_signature": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.count_params": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.dtype": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.dynamic": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.finalize": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.from_config": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_config": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_input_at": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_input_mask_at": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_input_shape_at": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_losses_for": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_output_at": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_output_mask_at": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_output_shape_at": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_updates_for": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.get_weights": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.initialize": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.input": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.input_mask": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.input_shape": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.input_spec": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.losses": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.metrics": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.name": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.name_scope": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.non_trainable_variables": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.non_trainable_weights": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output_dtype": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output_mask": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output_shape": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.output_size": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.set_weights": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.step": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.submodules": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.tracks_own_finished": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.trainable": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.trainable_variables": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.trainable_weights": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.updates": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.variables": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.weights": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoder.with_name_scope": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin": false,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__eq__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__ge__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__gt__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__init__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__le__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__lt__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__ne__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.__new__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.batch_size": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.finalize": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.output_size": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.step": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin.tracks_own_finished": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput": false,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__add__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__contains__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__eq__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__ge__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__getitem__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__gt__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__init__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__iter__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__le__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__len__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__lt__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__mul__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__ne__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__new__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.__rmul__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.count": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.index": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.parent_ids": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.predicted_ids": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput.scores": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState": false,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__add__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__contains__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__eq__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__ge__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__getitem__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__gt__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__init__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__iter__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__le__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__len__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__lt__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__mul__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__ne__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__new__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.__rmul__": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.accumulated_attention_probs": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.cell_state": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.count": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.finished": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.index": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.lengths": true,
+ "tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState.log_probs": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput": false,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__add__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__contains__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__eq__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__ge__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__getitem__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__gt__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__init__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__iter__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__le__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__len__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__lt__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__mul__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__ne__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__new__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.__rmul__": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.beam_search_decoder_output": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.count": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.index": true,
+ "tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput.predicted_ids": true,
+ "tfa.seq2seq.beam_search_decoder.absolute_import": true,
+ "tfa.seq2seq.beam_search_decoder.attention_probs_from_attn_state": false,
+ "tfa.seq2seq.beam_search_decoder.division": true,
+ "tfa.seq2seq.beam_search_decoder.gather_tree_from_array": false,
+ "tfa.seq2seq.beam_search_decoder.get_attention_probs": false,
+ "tfa.seq2seq.beam_search_decoder.print_function": true,
+ "tfa.seq2seq.beam_search_decoder.tile_batch": false,
+ "tfa.seq2seq.decoder": false,
+ "tfa.seq2seq.decoder.BaseDecoder": false,
+ "tfa.seq2seq.decoder.BaseDecoder.__call__": true,
+ "tfa.seq2seq.decoder.BaseDecoder.__eq__": true,
+ "tfa.seq2seq.decoder.BaseDecoder.__ge__": true,
+ "tfa.seq2seq.decoder.BaseDecoder.__gt__": true,
+ "tfa.seq2seq.decoder.BaseDecoder.__init__": true,
+ "tfa.seq2seq.decoder.BaseDecoder.__le__": true,
+ "tfa.seq2seq.decoder.BaseDecoder.__lt__": true,
+ "tfa.seq2seq.decoder.BaseDecoder.__ne__": true,
+ "tfa.seq2seq.decoder.BaseDecoder.__new__": true,
+ "tfa.seq2seq.decoder.BaseDecoder.activity_regularizer": true,
+ "tfa.seq2seq.decoder.BaseDecoder.add_loss": true,
+ "tfa.seq2seq.decoder.BaseDecoder.add_metric": true,
+ "tfa.seq2seq.decoder.BaseDecoder.add_update": true,
+ "tfa.seq2seq.decoder.BaseDecoder.add_weight": true,
+ "tfa.seq2seq.decoder.BaseDecoder.batch_size": true,
+ "tfa.seq2seq.decoder.BaseDecoder.build": true,
+ "tfa.seq2seq.decoder.BaseDecoder.call": true,
+ "tfa.seq2seq.decoder.BaseDecoder.compute_mask": true,
+ "tfa.seq2seq.decoder.BaseDecoder.compute_output_shape": true,
+ "tfa.seq2seq.decoder.BaseDecoder.compute_output_signature": true,
+ "tfa.seq2seq.decoder.BaseDecoder.count_params": true,
+ "tfa.seq2seq.decoder.BaseDecoder.dtype": true,
+ "tfa.seq2seq.decoder.BaseDecoder.dynamic": true,
+ "tfa.seq2seq.decoder.BaseDecoder.finalize": true,
+ "tfa.seq2seq.decoder.BaseDecoder.from_config": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_config": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_input_at": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_input_mask_at": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_input_shape_at": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_losses_for": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_output_at": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_output_mask_at": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_output_shape_at": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_updates_for": true,
+ "tfa.seq2seq.decoder.BaseDecoder.get_weights": true,
+ "tfa.seq2seq.decoder.BaseDecoder.initialize": true,
+ "tfa.seq2seq.decoder.BaseDecoder.input": true,
+ "tfa.seq2seq.decoder.BaseDecoder.input_mask": true,
+ "tfa.seq2seq.decoder.BaseDecoder.input_shape": true,
+ "tfa.seq2seq.decoder.BaseDecoder.input_spec": true,
+ "tfa.seq2seq.decoder.BaseDecoder.losses": true,
+ "tfa.seq2seq.decoder.BaseDecoder.metrics": true,
+ "tfa.seq2seq.decoder.BaseDecoder.name": true,
+ "tfa.seq2seq.decoder.BaseDecoder.name_scope": true,
+ "tfa.seq2seq.decoder.BaseDecoder.non_trainable_variables": true,
+ "tfa.seq2seq.decoder.BaseDecoder.non_trainable_weights": true,
+ "tfa.seq2seq.decoder.BaseDecoder.output": true,
+ "tfa.seq2seq.decoder.BaseDecoder.output_dtype": true,
+ "tfa.seq2seq.decoder.BaseDecoder.output_mask": true,
+ "tfa.seq2seq.decoder.BaseDecoder.output_shape": true,
+ "tfa.seq2seq.decoder.BaseDecoder.output_size": true,
+ "tfa.seq2seq.decoder.BaseDecoder.set_weights": true,
+ "tfa.seq2seq.decoder.BaseDecoder.step": true,
+ "tfa.seq2seq.decoder.BaseDecoder.submodules": true,
+ "tfa.seq2seq.decoder.BaseDecoder.tracks_own_finished": true,
+ "tfa.seq2seq.decoder.BaseDecoder.trainable": true,
+ "tfa.seq2seq.decoder.BaseDecoder.trainable_variables": true,
+ "tfa.seq2seq.decoder.BaseDecoder.trainable_weights": true,
+ "tfa.seq2seq.decoder.BaseDecoder.updates": true,
+ "tfa.seq2seq.decoder.BaseDecoder.variables": true,
+ "tfa.seq2seq.decoder.BaseDecoder.weights": true,
+ "tfa.seq2seq.decoder.BaseDecoder.with_name_scope": true,
+ "tfa.seq2seq.decoder.Decoder": false,
+ "tfa.seq2seq.decoder.Decoder.__eq__": true,
+ "tfa.seq2seq.decoder.Decoder.__ge__": true,
+ "tfa.seq2seq.decoder.Decoder.__gt__": true,
+ "tfa.seq2seq.decoder.Decoder.__init__": true,
+ "tfa.seq2seq.decoder.Decoder.__le__": true,
+ "tfa.seq2seq.decoder.Decoder.__lt__": true,
+ "tfa.seq2seq.decoder.Decoder.__ne__": true,
+ "tfa.seq2seq.decoder.Decoder.__new__": true,
+ "tfa.seq2seq.decoder.Decoder.batch_size": true,
+ "tfa.seq2seq.decoder.Decoder.finalize": true,
+ "tfa.seq2seq.decoder.Decoder.initialize": true,
+ "tfa.seq2seq.decoder.Decoder.output_dtype": true,
+ "tfa.seq2seq.decoder.Decoder.output_size": true,
+ "tfa.seq2seq.decoder.Decoder.step": true,
+ "tfa.seq2seq.decoder.Decoder.tracks_own_finished": true,
+ "tfa.seq2seq.decoder.absolute_import": true,
+ "tfa.seq2seq.decoder.division": true,
+ "tfa.seq2seq.decoder.dynamic_decode": false,
+ "tfa.seq2seq.decoder.print_function": true,
+ "tfa.seq2seq.division": true,
+ "tfa.seq2seq.dynamic_decode": false,
+ "tfa.seq2seq.gather_tree_from_array": false,
+ "tfa.seq2seq.hardmax": false,
+ "tfa.seq2seq.loss": false,
+ "tfa.seq2seq.loss.SequenceLoss": false,
+ "tfa.seq2seq.loss.SequenceLoss.__call__": true,
+ "tfa.seq2seq.loss.SequenceLoss.__eq__": true,
+ "tfa.seq2seq.loss.SequenceLoss.__ge__": true,
+ "tfa.seq2seq.loss.SequenceLoss.__gt__": true,
+ "tfa.seq2seq.loss.SequenceLoss.__init__": true,
+ "tfa.seq2seq.loss.SequenceLoss.__le__": true,
+ "tfa.seq2seq.loss.SequenceLoss.__lt__": true,
+ "tfa.seq2seq.loss.SequenceLoss.__ne__": true,
+ "tfa.seq2seq.loss.SequenceLoss.__new__": true,
+ "tfa.seq2seq.loss.SequenceLoss.call": true,
+ "tfa.seq2seq.loss.SequenceLoss.from_config": true,
+ "tfa.seq2seq.loss.SequenceLoss.get_config": true,
+ "tfa.seq2seq.loss.absolute_import": true,
+ "tfa.seq2seq.loss.division": true,
+ "tfa.seq2seq.loss.print_function": true,
+ "tfa.seq2seq.loss.sequence_loss": false,
+ "tfa.seq2seq.monotonic_attention": false,
+ "tfa.seq2seq.print_function": true,
+ "tfa.seq2seq.safe_cumprod": false,
+ "tfa.seq2seq.sampler": false,
+ "tfa.seq2seq.sampler.CustomSampler": false,
+ "tfa.seq2seq.sampler.CustomSampler.__eq__": true,
+ "tfa.seq2seq.sampler.CustomSampler.__ge__": true,
+ "tfa.seq2seq.sampler.CustomSampler.__gt__": true,
+ "tfa.seq2seq.sampler.CustomSampler.__init__": true,
+ "tfa.seq2seq.sampler.CustomSampler.__le__": true,
+ "tfa.seq2seq.sampler.CustomSampler.__lt__": true,
+ "tfa.seq2seq.sampler.CustomSampler.__ne__": true,
+ "tfa.seq2seq.sampler.CustomSampler.__new__": true,
+ "tfa.seq2seq.sampler.CustomSampler.batch_size": true,
+ "tfa.seq2seq.sampler.CustomSampler.initialize": true,
+ "tfa.seq2seq.sampler.CustomSampler.next_inputs": true,
+ "tfa.seq2seq.sampler.CustomSampler.sample": true,
+ "tfa.seq2seq.sampler.CustomSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.sampler.CustomSampler.sample_ids_shape": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler": false,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__eq__": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__ge__": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__gt__": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__init__": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__le__": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__lt__": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__ne__": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.__new__": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.batch_size": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.initialize": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.next_inputs": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.sample": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.sampler.GreedyEmbeddingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.sampler.InferenceSampler": false,
+ "tfa.seq2seq.sampler.InferenceSampler.__eq__": true,
+ "tfa.seq2seq.sampler.InferenceSampler.__ge__": true,
+ "tfa.seq2seq.sampler.InferenceSampler.__gt__": true,
+ "tfa.seq2seq.sampler.InferenceSampler.__init__": true,
+ "tfa.seq2seq.sampler.InferenceSampler.__le__": true,
+ "tfa.seq2seq.sampler.InferenceSampler.__lt__": true,
+ "tfa.seq2seq.sampler.InferenceSampler.__ne__": true,
+ "tfa.seq2seq.sampler.InferenceSampler.__new__": true,
+ "tfa.seq2seq.sampler.InferenceSampler.batch_size": true,
+ "tfa.seq2seq.sampler.InferenceSampler.initialize": true,
+ "tfa.seq2seq.sampler.InferenceSampler.next_inputs": true,
+ "tfa.seq2seq.sampler.InferenceSampler.sample": true,
+ "tfa.seq2seq.sampler.InferenceSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.sampler.InferenceSampler.sample_ids_shape": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler": false,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__eq__": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__ge__": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__gt__": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__init__": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__le__": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__lt__": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__ne__": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.__new__": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.batch_size": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.initialize": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.next_inputs": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.sample": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.sampler.SampleEmbeddingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.sampler.Sampler": false,
+ "tfa.seq2seq.sampler.Sampler.__eq__": true,
+ "tfa.seq2seq.sampler.Sampler.__ge__": true,
+ "tfa.seq2seq.sampler.Sampler.__gt__": true,
+ "tfa.seq2seq.sampler.Sampler.__init__": true,
+ "tfa.seq2seq.sampler.Sampler.__le__": true,
+ "tfa.seq2seq.sampler.Sampler.__lt__": true,
+ "tfa.seq2seq.sampler.Sampler.__ne__": true,
+ "tfa.seq2seq.sampler.Sampler.__new__": true,
+ "tfa.seq2seq.sampler.Sampler.batch_size": true,
+ "tfa.seq2seq.sampler.Sampler.initialize": true,
+ "tfa.seq2seq.sampler.Sampler.next_inputs": true,
+ "tfa.seq2seq.sampler.Sampler.sample": true,
+ "tfa.seq2seq.sampler.Sampler.sample_ids_dtype": true,
+ "tfa.seq2seq.sampler.Sampler.sample_ids_shape": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler": false,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__eq__": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__ge__": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__gt__": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__init__": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__le__": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__lt__": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__ne__": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.__new__": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.batch_size": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.initialize": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.next_inputs": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.sample": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler": false,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__eq__": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__ge__": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__gt__": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__init__": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__le__": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__lt__": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__ne__": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.__new__": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.batch_size": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.initialize": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.next_inputs": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.sample": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.sampler.ScheduledOutputTrainingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.sampler.TrainingSampler": false,
+ "tfa.seq2seq.sampler.TrainingSampler.__eq__": true,
+ "tfa.seq2seq.sampler.TrainingSampler.__ge__": true,
+ "tfa.seq2seq.sampler.TrainingSampler.__gt__": true,
+ "tfa.seq2seq.sampler.TrainingSampler.__init__": true,
+ "tfa.seq2seq.sampler.TrainingSampler.__le__": true,
+ "tfa.seq2seq.sampler.TrainingSampler.__lt__": true,
+ "tfa.seq2seq.sampler.TrainingSampler.__ne__": true,
+ "tfa.seq2seq.sampler.TrainingSampler.__new__": true,
+ "tfa.seq2seq.sampler.TrainingSampler.batch_size": true,
+ "tfa.seq2seq.sampler.TrainingSampler.initialize": true,
+ "tfa.seq2seq.sampler.TrainingSampler.next_inputs": true,
+ "tfa.seq2seq.sampler.TrainingSampler.sample": true,
+ "tfa.seq2seq.sampler.TrainingSampler.sample_ids_dtype": true,
+ "tfa.seq2seq.sampler.TrainingSampler.sample_ids_shape": true,
+ "tfa.seq2seq.sampler.absolute_import": true,
+ "tfa.seq2seq.sampler.bernoulli_sample": false,
+ "tfa.seq2seq.sampler.categorical_sample": false,
+ "tfa.seq2seq.sampler.division": true,
+ "tfa.seq2seq.sampler.print_function": true,
+ "tfa.seq2seq.sequence_loss": false,
+ "tfa.seq2seq.tile_batch": false,
+ "tfa.text": false,
+ "tfa.text.absolute_import": true,
+ "tfa.text.crf": false,
+ "tfa.text.crf.CrfDecodeForwardRnnCell": false,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__call__": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__eq__": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__ge__": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__gt__": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__init__": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__le__": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__lt__": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__ne__": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.__new__": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.activity_regularizer": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.add_loss": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.add_metric": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.add_update": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.add_weight": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.build": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.call": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.compute_mask": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.compute_output_shape": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.compute_output_signature": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.count_params": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.dtype": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.dynamic": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.from_config": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_config": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_initial_state": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_input_at": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_input_mask_at": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_input_shape_at": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_losses_for": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_output_at": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_output_mask_at": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_output_shape_at": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_updates_for": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.get_weights": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.input": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.input_mask": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.input_shape": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.input_spec": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.losses": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.metrics": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.name": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.name_scope": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.non_trainable_variables": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.non_trainable_weights": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.output": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.output_mask": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.output_shape": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.output_size": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.set_weights": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.state_size": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.submodules": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.trainable": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.trainable_variables": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.trainable_weights": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.updates": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.variables": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.weights": true,
+ "tfa.text.crf.CrfDecodeForwardRnnCell.with_name_scope": true,
+ "tfa.text.crf.absolute_import": true,
+ "tfa.text.crf.crf_binary_score": false,
+ "tfa.text.crf.crf_decode": false,
+ "tfa.text.crf.crf_decode_backward": false,
+ "tfa.text.crf.crf_decode_forward": false,
+ "tfa.text.crf.crf_forward": false,
+ "tfa.text.crf.crf_log_likelihood": false,
+ "tfa.text.crf.crf_log_norm": false,
+ "tfa.text.crf.crf_multitag_sequence_score": false,
+ "tfa.text.crf.crf_sequence_score": false,
+ "tfa.text.crf.crf_unary_score": false,
+ "tfa.text.crf.division": true,
+ "tfa.text.crf.print_function": true,
+ "tfa.text.crf.viterbi_decode": false,
+ "tfa.text.crf_binary_score": false,
+ "tfa.text.crf_decode": false,
+ "tfa.text.crf_decode_backward": false,
+ "tfa.text.crf_decode_forward": false,
+ "tfa.text.crf_forward": false,
+ "tfa.text.crf_log_likelihood": false,
+ "tfa.text.crf_log_norm": false,
+ "tfa.text.crf_multitag_sequence_score": false,
+ "tfa.text.crf_sequence_score": false,
+ "tfa.text.crf_unary_score": false,
+ "tfa.text.division": true,
+ "tfa.text.parse_time": false,
+ "tfa.text.parse_time_op": false,
+ "tfa.text.parse_time_op.absolute_import": true,
+ "tfa.text.parse_time_op.division": true,
+ "tfa.text.parse_time_op.parse_time": false,
+ "tfa.text.parse_time_op.print_function": true,
+ "tfa.text.print_function": true,
+ "tfa.text.skip_gram_ops": false,
+ "tfa.text.skip_gram_ops.absolute_import": true,
+ "tfa.text.skip_gram_ops.division": true,
+ "tfa.text.skip_gram_ops.print_function": true,
+ "tfa.text.skip_gram_ops.skip_gram_sample": false,
+ "tfa.text.skip_gram_ops.skip_gram_sample_with_text_vocab": false,
+ "tfa.text.skip_gram_sample": false,
+ "tfa.text.skip_gram_sample_with_text_vocab": false,
+ "tfa.text.viterbi_decode": false
+ },
+ "py_module_names": [
+ "tfa"
+ ]
+}
diff --git a/docs/api_docs/python/tfa/activations.md b/docs/api_docs/python/tfa/activations.md
new file mode 100644
index 0000000000..7c6175d557
--- /dev/null
+++ b/docs/api_docs/python/tfa/activations.md
@@ -0,0 +1,43 @@
+
+
+
+
+
+# Module: tfa.activations
+
+
+
+
+
+
+Addititonal activation functions.
+
+
+
+## Functions
+
+[`gelu(...)`](../tfa/activations/gelu.md): Gaussian Error Linear Unit.
+
+[`hardshrink(...)`](../tfa/activations/hardshrink.md): Hard shrink function.
+
+[`lisht(...)`](../tfa/activations/lisht.md): LiSHT: Non-Parameteric Linearly Scaled Hyperbolic Tangent Activation Function.
+
+[`mish(...)`](../tfa/activations/mish.md): Mish: A Self Regularized Non-Monotonic Neural Activation Function.
+
+[`rrelu(...)`](../tfa/activations/rrelu.md): rrelu function.
+
+[`softshrink(...)`](../tfa/activations/softshrink.md): Soft shrink function.
+
+[`sparsemax(...)`](../tfa/activations/sparsemax.md): Sparsemax activation function [1].
+
+[`tanhshrink(...)`](../tfa/activations/tanhshrink.md): Applies the element-wise function: x - tanh(x)
+
+
+
diff --git a/docs/api_docs/python/tfa/activations/gelu.md b/docs/api_docs/python/tfa/activations/gelu.md
new file mode 100644
index 0000000000..13785a2471
--- /dev/null
+++ b/docs/api_docs/python/tfa/activations/gelu.md
@@ -0,0 +1,54 @@
+
+
+
+
+
+# tfa.activations.gelu
+
+
+
+
+
+
+
+
+Gaussian Error Linear Unit.
+
+``` python
+tfa.activations.gelu(
+ x,
+ approximate=True
+)
+```
+
+
+
+
+
+Computes gaussian error linear:
+`0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or
+`x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1),
+depending on whether approximation is enabled.
+
+See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
+and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).
+
+#### Args:
+
+
+* `x`: A `Tensor`. Must be one of the following types:
+ `float16`, `float32`, `float64`.
+* `approximate`: bool, whether to enable approximation.
+
+#### Returns:
+
+A `Tensor`. Has the same type as `x`.
+
+
diff --git a/docs/api_docs/python/tfa/activations/hardshrink.md b/docs/api_docs/python/tfa/activations/hardshrink.md
new file mode 100644
index 0000000000..7232489187
--- /dev/null
+++ b/docs/api_docs/python/tfa/activations/hardshrink.md
@@ -0,0 +1,51 @@
+
+
+
+
+
+# tfa.activations.hardshrink
+
+
+
+
+
+
+
+
+Hard shrink function.
+
+``` python
+tfa.activations.hardshrink(
+ x,
+ lower=-0.5,
+ upper=0.5
+)
+```
+
+
+
+
+
+Computes hard shrink function:
+`x if x < lower or x > upper else 0`.
+
+#### Args:
+
+
+* `x`: A `Tensor`. Must be one of the following types:
+ `float16`, `float32`, `float64`.
+* `lower`: `float`, lower bound for setting values to zeros.
+* `upper`: `float`, upper bound for setting values to zeros.
+
+#### Returns:
+
+A `Tensor`. Has the same type as `x`.
+
+
diff --git a/docs/api_docs/python/tfa/activations/lisht.md b/docs/api_docs/python/tfa/activations/lisht.md
new file mode 100644
index 0000000000..1d6534add5
--- /dev/null
+++ b/docs/api_docs/python/tfa/activations/lisht.md
@@ -0,0 +1,46 @@
+
+
+
+
+
+# tfa.activations.lisht
+
+
+
+
+
+
+
+
+LiSHT: Non-Parameteric Linearly Scaled Hyperbolic Tangent Activation Function.
+
+``` python
+tfa.activations.lisht(x)
+```
+
+
+
+
+
+Computes linearly scaled hyperbolic tangent (LiSHT): `x * tanh(x)`
+
+See [LiSHT: Non-Parameteric Linearly Scaled Hyperbolic Tangent Activation Function for Neural Networks](https://arxiv.org/abs/1901.05894).
+
+#### Args:
+
+
+* `x`: A `Tensor`. Must be one of the following types:
+ `float16`, `float32`, `float64`.
+
+#### Returns:
+
+A `Tensor`. Has the same type as `x`.
+
+
diff --git a/docs/api_docs/python/tfa/activations/mish.md b/docs/api_docs/python/tfa/activations/mish.md
new file mode 100644
index 0000000000..6028af077c
--- /dev/null
+++ b/docs/api_docs/python/tfa/activations/mish.md
@@ -0,0 +1,46 @@
+
+
+
+
+
+# tfa.activations.mish
+
+
+
+
+
+
+
+
+Mish: A Self Regularized Non-Monotonic Neural Activation Function.
+
+``` python
+tfa.activations.mish(x)
+```
+
+
+
+
+
+Computes mish activation: x * tanh(softplus(x))
+
+See [Mish: A Self Regularized Non-Monotonic Neural Activation Function](https://arxiv.org/abs/1908.08681).
+
+#### Args:
+
+
+* `x`: A `Tensor`. Must be one of the following types:
+ `float16`, `float32`, `float64`.
+
+#### Returns:
+
+A `Tensor`. Has the same type as `x`.
+
+
diff --git a/docs/api_docs/python/tfa/activations/rrelu.md b/docs/api_docs/python/tfa/activations/rrelu.md
new file mode 100644
index 0000000000..3ae4b027f6
--- /dev/null
+++ b/docs/api_docs/python/tfa/activations/rrelu.md
@@ -0,0 +1,60 @@
+
+
+
+
+
+# tfa.activations.rrelu
+
+
+
+
+
+
+
+
+rrelu function.
+
+``` python
+tfa.activations.rrelu(
+ x,
+ lower=0.125,
+ upper=0.3333333333333333,
+ training=None,
+ seed=None
+)
+```
+
+
+
+
+
+Computes rrelu function:
+`x if x > 0 else random(lower, upper) * x` or
+`x if x > 0 else x * (lower + upper) / 2`
+depending on whether training is enabled.
+
+See [Empirical Evaluation of Rectified Activations in Convolutional Network](https://arxiv.org/abs/1505.00853).
+
+#### Args:
+
+
+* `x`: A `Tensor`. Must be one of the following types:
+ `float16`, `float32`, `float64`.
+* `lower`: `float`, lower bound for random alpha.
+* `upper`: `float`, upper bound for random alpha.
+* `training`: `bool`, indicating whether the `call`
+is meant for training or inference.
+* `seed`: `int`, this sets the operation-level seed.
+
+#### Returns:
+
+
+* `result`: A `Tensor`. Has the same type as `x`.
+
diff --git a/docs/api_docs/python/tfa/activations/softshrink.md b/docs/api_docs/python/tfa/activations/softshrink.md
new file mode 100644
index 0000000000..0053dd7a27
--- /dev/null
+++ b/docs/api_docs/python/tfa/activations/softshrink.md
@@ -0,0 +1,51 @@
+
+
+
+
+
+# tfa.activations.softshrink
+
+
+
+
+
+
+
+
+Soft shrink function.
+
+``` python
+tfa.activations.softshrink(
+ x,
+ lower=-0.5,
+ upper=0.5
+)
+```
+
+
+
+
+
+Computes soft shrink function:
+`x - lower if x < lower, x - upper if x > upper else 0`.
+
+#### Args:
+
+
+* `x`: A `Tensor`. Must be one of the following types:
+ `float16`, `float32`, `float64`.
+* `lower`: `float`, lower bound for setting values to zeros.
+* `upper`: `float`, upper bound for setting values to zeros.
+
+#### Returns:
+
+A `Tensor`. Has the same type as `x`.
+
+
diff --git a/docs/api_docs/python/tfa/activations/sparsemax.md b/docs/api_docs/python/tfa/activations/sparsemax.md
new file mode 100644
index 0000000000..ae30557842
--- /dev/null
+++ b/docs/api_docs/python/tfa/activations/sparsemax.md
@@ -0,0 +1,58 @@
+
+
+
+
+
+# tfa.activations.sparsemax
+
+
+
+
+
+
+
+
+Sparsemax activation function [1].
+
+**Aliases**: `tfa.layers.sparsemax.sparsemax`
+
+``` python
+tfa.activations.sparsemax(
+ logits,
+ axis=-1
+)
+```
+
+
+
+
+
+For each batch `i` and class `j` we have
+ $$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$
+
+[1]: https://arxiv.org/abs/1602.02068
+
+#### Args:
+
+
+* `logits`: Input tensor.
+* `axis`: Integer, axis along which the sparsemax operation is applied.
+
+#### Returns:
+
+Tensor, output of sparsemax transformation. Has the same type and
+shape as `logits`.
+
+
+#### Raises:
+
+
+* `ValueError`: In case `dim(logits) == 1`.
+
diff --git a/docs/api_docs/python/tfa/activations/tanhshrink.md b/docs/api_docs/python/tfa/activations/tanhshrink.md
new file mode 100644
index 0000000000..f22d193fd0
--- /dev/null
+++ b/docs/api_docs/python/tfa/activations/tanhshrink.md
@@ -0,0 +1,43 @@
+
+
+
+
+
+# tfa.activations.tanhshrink
+
+
+
+
+
+
+
+
+Applies the element-wise function: x - tanh(x)
+
+``` python
+tfa.activations.tanhshrink(x)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `features`: A `Tensor`. Must be one of the following types:
+ `float16`, `float32`, `float64`.
+
+#### Returns:
+
+A `Tensor`. Has the same type as `features`.
+
+
diff --git a/docs/api_docs/python/tfa/callbacks.md b/docs/api_docs/python/tfa/callbacks.md
new file mode 100644
index 0000000000..7fa89bad16
--- /dev/null
+++ b/docs/api_docs/python/tfa/callbacks.md
@@ -0,0 +1,37 @@
+
+
+
+
+
+# Module: tfa.callbacks
+
+
+
+
+
+
+Additional callbacks that conform to Keras API.
+
+
+
+## Modules
+
+[`time_stopping`](../tfa/callbacks/time_stopping.md) module: Callback that stops training when a specified amount of time has passed.
+
+[`tqdm_progress_bar`](../tfa/callbacks/tqdm_progress_bar.md) module: TQDM Progress Bar.
+
+## Classes
+
+[`class TQDMProgressBar`](../tfa/callbacks/TQDMProgressBar.md): TQDM Progress Bar for Tensorflow Keras.
+
+[`class TimeStopping`](../tfa/callbacks/TimeStopping.md): Stop training when a specified amount of time has passed.
+
+
+
diff --git a/docs/api_docs/python/tfa/callbacks/TQDMProgressBar.md b/docs/api_docs/python/tfa/callbacks/TQDMProgressBar.md
new file mode 100644
index 0000000000..220ecb8233
--- /dev/null
+++ b/docs/api_docs/python/tfa/callbacks/TQDMProgressBar.md
@@ -0,0 +1,143 @@
+
+
+
+
+
+
+
+
+
+
+# tfa.callbacks.TQDMProgressBar
+
+
+
+
+
+
+
+
+## Class `TQDMProgressBar`
+
+TQDM Progress Bar for Tensorflow Keras.
+
+
+
+**Aliases**: `tfa.callbacks.tqdm_progress_bar.TQDMProgressBar`
+
+
+
+
+#### Arguments:
+
+metrics_separator (string): Custom separator between metrics.
+ Defaults to ' - '
+overall_bar_format (string format): Custom bar format for overall
+ (outer) progress bar, see https://github.com/tqdm/tqdm#parameters
+ for more detail.
+epoch_bar_format (string format): Custom bar format for epoch
+ (inner) progress bar, see https://github.com/tqdm/tqdm#parameters
+ for more detail.
+update_per_second (int): Maximum number of updates in the epochs bar
+ per second, this is to prevent small batches from slowing down
+ training. Defaults to 10.
+leave_epoch_progress (bool): True to leave epoch progress bars
+leave_overall_progress (bool): True to leave overall progress bar
+show_epoch_progress (bool): False to hide epoch progress bars
+show_overall_progress (bool): False to hide overall progress bar
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ metrics_separator=' - ',
+ overall_bar_format='{l_bar}{bar} {n_fmt}/{total_fmt} ETA: {remaining}s, {rate_fmt}{postfix}',
+ epoch_bar_format='{n_fmt}/{total_fmt}{bar} ETA: {remaining}s - {desc}',
+ update_per_second=10,
+ leave_epoch_progress=True,
+ leave_overall_progress=True,
+ show_epoch_progress=True,
+ show_overall_progress=True
+)
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+
+
+View source
+
+``` python
+format_metrics(
+ logs={},
+ factor=1
+)
+```
+
+Format metrics in logs into a string.
+
+
+#### Arguments:
+
+
+* `logs`: dictionary of metrics and their values. Defaults to
+ empty dictionary.
+factor (int): The factor we want to divide the metrics in logs
+ by, useful when we are computing the logs after each batch.
+ Defaults to 1.
+
+
+#### Returns:
+
+
+* `metrics_string`: a string displaying metrics using the given
+formators passed in through the constructor.
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+set_model
+
+``` python
+set_model(model)
+```
+
+
+
+
+set_params
+
+``` python
+set_params(params)
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/callbacks/TimeStopping.md b/docs/api_docs/python/tfa/callbacks/TimeStopping.md
new file mode 100644
index 0000000000..44e17fca2b
--- /dev/null
+++ b/docs/api_docs/python/tfa/callbacks/TimeStopping.md
@@ -0,0 +1,94 @@
+
+
+
+
+
+
+
+
+
+# tfa.callbacks.TimeStopping
+
+
+
+
+
+
+
+
+## Class `TimeStopping`
+
+Stop training when a specified amount of time has passed.
+
+
+
+**Aliases**: `tfa.callbacks.time_stopping.TimeStopping`
+
+
+
+
+#### Args:
+
+
+* `seconds`: maximum amount of time before stopping.
+ Defaults to 86400 (1 day).
+* `verbose`: verbosity mode. Defaults to 0.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ seconds=86400,
+ verbose=0
+)
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+set_model
+
+``` python
+set_model(model)
+```
+
+
+
+
+set_params
+
+``` python
+set_params(params)
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/callbacks/time_stopping.md b/docs/api_docs/python/tfa/callbacks/time_stopping.md
new file mode 100644
index 0000000000..a27d3b5381
--- /dev/null
+++ b/docs/api_docs/python/tfa/callbacks/time_stopping.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.callbacks.time_stopping
+
+
+
+
+
+
+Callback that stops training when a specified amount of time has passed.
+
+
+
+## Classes
+
+[`class TimeStopping`](../../tfa/callbacks/TimeStopping.md): Stop training when a specified amount of time has passed.
+
+
+
diff --git a/docs/api_docs/python/tfa/callbacks/tqdm_progress_bar.md b/docs/api_docs/python/tfa/callbacks/tqdm_progress_bar.md
new file mode 100644
index 0000000000..68686bd5e0
--- /dev/null
+++ b/docs/api_docs/python/tfa/callbacks/tqdm_progress_bar.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.callbacks.tqdm_progress_bar
+
+
+
+
+
+
+TQDM Progress Bar.
+
+
+
+## Classes
+
+[`class TQDMProgressBar`](../../tfa/callbacks/TQDMProgressBar.md): TQDM Progress Bar for Tensorflow Keras.
+
+
+
diff --git a/docs/api_docs/python/tfa/image.md b/docs/api_docs/python/tfa/image.md
new file mode 100644
index 0000000000..3ff7284461
--- /dev/null
+++ b/docs/api_docs/python/tfa/image.md
@@ -0,0 +1,71 @@
+
+
+
+
+
+# Module: tfa.image
+
+
+
+
+
+
+Additional image manipulation ops.
+
+
+
+## Modules
+
+[`distance_transform`](../tfa/image/distance_transform.md) module: Distance transform ops.
+
+[`distort_image_ops`](../tfa/image/distort_image_ops.md) module: Python layer for distort_image_ops.
+
+[`filters`](../tfa/image/filters.md) module
+
+[`resampler_ops`](../tfa/image/resampler_ops.md) module: Python layer for Resampler.
+
+[`transform_ops`](../tfa/image/transform_ops.md) module: Image transform ops.
+
+[`translate_ops`](../tfa/image/translate_ops.md) module: Image translate ops.
+
+[`utils`](../tfa/image/utils.md) module: Image util ops.
+
+## Functions
+
+[`adjust_hsv_in_yiq(...)`](../tfa/image/adjust_hsv_in_yiq.md): Adjust hue, saturation, value of an RGB image in YIQ color space.
+
+[`connected_components(...)`](../tfa/image/connected_components.md): Labels the connected components in a batch of images.
+
+[`dense_image_warp(...)`](../tfa/image/dense_image_warp.md): Image warping using per-pixel flow vectors.
+
+[`euclidean_dist_transform(...)`](../tfa/image/euclidean_dist_transform.md): Applies euclidean distance transform(s) to the image(s).
+
+[`interpolate_bilinear(...)`](../tfa/image/interpolate_bilinear.md): Similar to Matlab's interp2 function.
+
+[`interpolate_spline(...)`](../tfa/image/interpolate_spline.md): Interpolate signal using polyharmonic interpolation.
+
+[`mean_filter2d(...)`](../tfa/image/mean_filter2d.md): Perform mean filtering on image(s).
+
+[`median_filter2d(...)`](../tfa/image/median_filter2d.md): Perform median filtering on image(s).
+
+[`random_hsv_in_yiq(...)`](../tfa/image/random_hsv_in_yiq.md): Adjust hue, saturation, value of an RGB image randomly in YIQ color
+
+[`resampler(...)`](../tfa/image/resampler.md): Resamples input data at user defined coordinates.
+
+[`rotate(...)`](../tfa/image/rotate.md): Rotate image(s) counterclockwise by the passed angle(s) in radians.
+
+[`sparse_image_warp(...)`](../tfa/image/sparse_image_warp.md): Image warping using correspondences between sparse control points.
+
+[`transform(...)`](../tfa/image/transform.md): Applies the given transform(s) to the image(s).
+
+[`translate(...)`](../tfa/image/translate.md): Translate image(s) by the passed vectors(s).
+
+
+
diff --git a/docs/api_docs/python/tfa/image/adjust_hsv_in_yiq.md b/docs/api_docs/python/tfa/image/adjust_hsv_in_yiq.md
new file mode 100644
index 0000000000..c1b29b507b
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/adjust_hsv_in_yiq.md
@@ -0,0 +1,66 @@
+
+
+
+
+
+# tfa.image.adjust_hsv_in_yiq
+
+
+
+
+
+
+
+
+Adjust hue, saturation, value of an RGB image in YIQ color space.
+
+**Aliases**: `tfa.image.distort_image_ops.adjust_hsv_in_yiq`
+
+``` python
+tfa.image.adjust_hsv_in_yiq(
+ image,
+ delta_hue=0,
+ scale_saturation=1,
+ scale_value=1,
+ name=None
+)
+```
+
+
+
+
+
+This is a convenience method that converts an RGB image to float
+representation, converts it to YIQ, rotates the color around the
+Y channel by delta_hue in radians, scales the chrominance channels
+(I, Q) by scale_saturation, scales all channels (Y, I, Q) by scale_value,
+converts back to RGB, and then back to the original data type.
+
+`image` is an RGB image. The image hue is adjusted by converting the
+image to YIQ, rotating around the luminance channel (Y) by
+`delta_hue` in radians, multiplying the chrominance channels (I, Q) by
+`scale_saturation`, and multiplying all channels (Y, I, Q) by
+`scale_value`. The image is then converted back to RGB.
+
+#### Args:
+
+
+* `image`: RGB image or images. Size of the last dimension must be 3.
+* `delta_hue`: float, the hue rotation amount, in radians.
+* `scale_saturation`: float, factor to multiply the saturation by.
+* `scale_value`: float, factor to multiply the value by.
+* `name`: A name for this operation (optional).
+
+
+#### Returns:
+
+Adjusted image(s), same shape and dtype as `image`.
+
+
diff --git a/docs/api_docs/python/tfa/image/connected_components.md b/docs/api_docs/python/tfa/image/connected_components.md
new file mode 100644
index 0000000000..8f7961ce4b
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/connected_components.md
@@ -0,0 +1,56 @@
+
+
+
+
+
+# tfa.image.connected_components
+
+
+
+
+
+
+
+
+Labels the connected components in a batch of images.
+
+``` python
+tfa.image.connected_components(
+ images,
+ name=None
+)
+```
+
+
+
+
+
+A component is a set of pixels in a single input image, which are
+all adjacent and all have the same non-zero value. The components
+using a squared connectivity of one (all True entries are joined with
+their neighbors above,below, left, and right). Components across all
+images have consecutive ids 1 through n.
+Components are labeled according to the first pixel of the
+component appearing in row-major order (lexicographic order by
+image_index_in_batch, row, col).
+Zero entries all have an output id of 0.
+This op is equivalent with `scipy.ndimage.measurements.label`
+on a 2D array with the default structuring element
+(which is the connectivity used here).
+Args:
+ images: A 2D (H, W) or 3D (N, H, W) Tensor of boolean image(s).
+ name: The name of the op.
+Returns:
+ Components with the same shape as `images`.
+ False entries in `images` have value 0, and
+ all True entries map to a component id > 0.
+Raises:
+ TypeError: if `images` is not 2D or 3D.
+
diff --git a/docs/api_docs/python/tfa/image/dense_image_warp.md b/docs/api_docs/python/tfa/image/dense_image_warp.md
new file mode 100644
index 0000000000..0cf8161bb9
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/dense_image_warp.md
@@ -0,0 +1,71 @@
+
+
+
+
+
+# tfa.image.dense_image_warp
+
+
+
+
+
+
+
+
+Image warping using per-pixel flow vectors.
+
+``` python
+tfa.image.dense_image_warp(
+ image,
+ flow,
+ name=None
+)
+```
+
+
+
+
+
+Apply a non-linear warp to the image, where the warp is specified by a
+dense flow field of offset vectors that define the correspondences of
+pixel values in the output image back to locations in the source image.
+Specifically, the pixel value at output[b, j, i, c] is
+images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c].
+
+The locations specified by this formula do not necessarily map to an int
+index. Therefore, the pixel value is obtained by bilinear
+interpolation of the 4 nearest pixels around
+(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside
+of the image, we use the nearest pixel values at the image boundary.
+
+#### Args:
+
+
+* `image`: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
+* `flow`: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
+* `name`: A name for the operation (optional).
+
+Note that image and flow can be of type tf.half, tf.float32, or
+tf.float64, and do not necessarily have to be the same type.
+
+
+#### Returns:
+
+A 4-D float `Tensor` with shape`[batch, height, width, channels]`
+ and same type as input image.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if height < 2 or width < 2 or the inputs have the wrong
+ number of dimensions.
+
diff --git a/docs/api_docs/python/tfa/image/distance_transform.md b/docs/api_docs/python/tfa/image/distance_transform.md
new file mode 100644
index 0000000000..aa8f2992f0
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/distance_transform.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.image.distance_transform
+
+
+
+
+
+
+Distance transform ops.
+
+
+
+## Functions
+
+[`euclidean_dist_transform(...)`](../../tfa/image/euclidean_dist_transform.md): Applies euclidean distance transform(s) to the image(s).
+
+
+
diff --git a/docs/api_docs/python/tfa/image/distort_image_ops.md b/docs/api_docs/python/tfa/image/distort_image_ops.md
new file mode 100644
index 0000000000..a263e5e3bc
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/distort_image_ops.md
@@ -0,0 +1,31 @@
+
+
+
+
+
+# Module: tfa.image.distort_image_ops
+
+
+
+
+
+
+Python layer for distort_image_ops.
+
+
+
+## Functions
+
+[`adjust_hsv_in_yiq(...)`](../../tfa/image/adjust_hsv_in_yiq.md): Adjust hue, saturation, value of an RGB image in YIQ color space.
+
+[`random_hsv_in_yiq(...)`](../../tfa/image/random_hsv_in_yiq.md): Adjust hue, saturation, value of an RGB image randomly in YIQ color
+
+
+
diff --git a/docs/api_docs/python/tfa/image/euclidean_dist_transform.md b/docs/api_docs/python/tfa/image/euclidean_dist_transform.md
new file mode 100644
index 0000000000..dea8790f6a
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/euclidean_dist_transform.md
@@ -0,0 +1,62 @@
+
+
+
+
+
+# tfa.image.euclidean_dist_transform
+
+
+
+
+
+
+
+
+Applies euclidean distance transform(s) to the image(s).
+
+**Aliases**: `tfa.image.distance_transform.euclidean_dist_transform`
+
+``` python
+tfa.image.euclidean_dist_transform(
+ images,
+ dtype=tf.float32,
+ name=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `images`: A tensor of shape (num_images, num_rows, num_columns, 1) (NHWC),
+ or (num_rows, num_columns, 1) (HWC) or (num_rows, num_columns) (HW).
+* `dtype`: DType of the output tensor.
+* `name`: The name of the op.
+
+
+#### Returns:
+
+Image(s) with the type `dtype` and same shape as `images`, with the
+transform applied. If a tensor of all ones is given as input, the
+output tensor will be filled with the max value of the `dtype`.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `image` is not tf.uint8, or `dtype` is not floating point.
+* `ValueError`: If `image` more than one channel, or `image` is not of
+ rank between 2 and 4.
+
diff --git a/docs/api_docs/python/tfa/image/filters.md b/docs/api_docs/python/tfa/image/filters.md
new file mode 100644
index 0000000000..dbc512e0a6
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/filters.md
@@ -0,0 +1,31 @@
+
+
+
+
+
+# Module: tfa.image.filters
+
+
+
+
+
+
+
+
+
+
+## Functions
+
+[`mean_filter2d(...)`](../../tfa/image/mean_filter2d.md): Perform mean filtering on image(s).
+
+[`median_filter2d(...)`](../../tfa/image/median_filter2d.md): Perform median filtering on image(s).
+
+
+
diff --git a/docs/api_docs/python/tfa/image/interpolate_bilinear.md b/docs/api_docs/python/tfa/image/interpolate_bilinear.md
new file mode 100644
index 0000000000..ff86a2f0d7
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/interpolate_bilinear.md
@@ -0,0 +1,61 @@
+
+
+
+
+
+# tfa.image.interpolate_bilinear
+
+
+
+
+
+
+
+
+Similar to Matlab's interp2 function.
+
+``` python
+tfa.image.interpolate_bilinear(
+ grid,
+ query_points,
+ indexing='ij',
+ name=None
+)
+```
+
+
+
+
+
+Finds values for query points on a grid using bilinear interpolation.
+
+#### Args:
+
+
+* `grid`: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
+* `query_points`: a 3-D float `Tensor` of N points with shape
+ `[batch, N, 2]`.
+* `indexing`: whether the query points are specified as row and column (ij),
+ or Cartesian coordinates (xy).
+* `name`: a name for the operation (optional).
+
+
+#### Returns:
+
+
+* `values`: a 3-D `Tensor` with shape `[batch, N, channels]`
+
+
+#### Raises:
+
+
+* `ValueError`: if the indexing mode is invalid, or if the shape of the
+ inputs invalid.
+
diff --git a/docs/api_docs/python/tfa/image/interpolate_spline.md b/docs/api_docs/python/tfa/image/interpolate_spline.md
new file mode 100644
index 0000000000..05ad23f123
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/interpolate_spline.md
@@ -0,0 +1,96 @@
+
+
+
+
+
+# tfa.image.interpolate_spline
+
+
+
+
+
+
+
+
+Interpolate signal using polyharmonic interpolation.
+
+``` python
+tfa.image.interpolate_spline(
+ train_points,
+ train_values,
+ query_points,
+ order,
+ regularization_weight=0.0,
+ name='interpolate_spline'
+)
+```
+
+
+
+
+
+The interpolant has the form
+$$f(x) = \sum_{i = 1}^n w_i \phi(||x - c_i||) + v^T x + b.$$
+
+This is a sum of two terms: (1) a weighted sum of radial basis function
+(RBF) terms, with the centers \\(c_1, ... c_n\\), and (2) a linear term
+with a bias. The \\(c_i\\) vectors are 'training' points.
+In the code, b is absorbed into v
+by appending 1 as a final dimension to x. The coefficients w and v are
+estimated such that the interpolant exactly fits the value of the function
+at the \\(c_i\\) points, the vector w is orthogonal to each \\(c_i\\),
+and the vector w sums to 0. With these constraints, the coefficients
+can be obtained by solving a linear system.
+
+\\(\phi\\) is an RBF, parametrized by an interpolation
+order. Using order=2 produces the well-known thin-plate spline.
+
+We also provide the option to perform regularized interpolation. Here, the
+interpolant is selected to trade off between the squared loss on the
+training data and a certain measure of its curvature
+([details](https://en.wikipedia.org/wiki/Polyharmonic_spline)).
+Using a regularization weight greater than zero has the effect that the
+interpolant will no longer exactly fit the training data. However, it may
+be less vulnerable to overfitting, particularly for high-order
+interpolation.
+
+Note the interpolation procedure is differentiable with respect to all
+inputs besides the order parameter.
+
+We support dynamically-shaped inputs, where batch_size, n, and m are None
+at graph construction time. However, d and k must be known.
+
+#### Args:
+
+
+* `train_points`: `[batch_size, n, d]` float `Tensor` of n d-dimensional
+ locations. These do not need to be regularly-spaced.
+* `train_values`: `[batch_size, n, k]` float `Tensor` of n c-dimensional
+ values evaluated at train_points.
+* `query_points`: `[batch_size, m, d]` `Tensor` of m d-dimensional locations
+ where we will output the interpolant's values.
+* `order`: order of the interpolation. Common values are 1 for
+ \\(\phi(r) = r\\), 2 for \\(\phi(r) = r^2 * log(r)\\)
+ (thin-plate spline), or 3 for \\(\phi(r) = r^3\\).
+* `regularization_weight`: weight placed on the regularization term.
+ This will depend substantially on the problem, and it should always be
+ tuned. For many problems, it is reasonable to use no regularization.
+ If using a non-zero value, we recommend a small value like 0.001.
+* `name`: name prefix for ops created by this function
+
+
+#### Returns:
+
+`[b, m, k]` float `Tensor` of query values. We use train_points and
+train_values to perform polyharmonic interpolation. The query values are
+the values of the interpolant evaluated at the locations specified in
+query_points.
+
+
diff --git a/docs/api_docs/python/tfa/image/mean_filter2d.md b/docs/api_docs/python/tfa/image/mean_filter2d.md
new file mode 100644
index 0000000000..895435d873
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/mean_filter2d.md
@@ -0,0 +1,69 @@
+
+
+
+
+
+# tfa.image.mean_filter2d
+
+
+
+
+
+
+
+
+Perform mean filtering on image(s).
+
+**Aliases**: `tfa.image.filters.mean_filter2d`
+
+``` python
+tfa.image.mean_filter2d(
+ image,
+ filter_shape=(3, 3),
+ padding='REFLECT',
+ constant_values=0,
+ name=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `image`: Either a 2-D `Tensor` of shape `[height, width]`,
+ a 3-D `Tensor` of shape `[height, width, channels]`,
+ or a 4-D `Tensor` of shape `[batch_size, height, width, channels]`.
+* `filter_shape`: An `integer` or `tuple`/`list` of 2 integers, specifying
+ the height and width of the 2-D mean filter. Can be a single integer
+ to specify the same value for all spatial dimensions.
+* `padding`: A `string`, one of "REFLECT", "CONSTANT", or "SYMMETRIC".
+ The type of padding algorithm to use, which is compatible with
+ `mode` argument in `tf.pad`. For more details, please refer to
+ https://www.tensorflow.org/api_docs/python/tf/pad.
+* `constant_values`: A `scalar`, the pad value to use in "CONSTANT"
+ padding mode.
+* `name`: A name for this operation (optional).
+
+#### Returns:
+
+3-D or 4-D `Tensor` of the same dtype as input.
+
+
+#### Raises:
+
+
+* `ValueError`: If `image` is not 2, 3 or 4-dimensional,
+ if `padding` is other than "REFLECT", "CONSTANT" or "SYMMETRIC",
+ or if `filter_shape` is invalid.
+
diff --git a/docs/api_docs/python/tfa/image/median_filter2d.md b/docs/api_docs/python/tfa/image/median_filter2d.md
new file mode 100644
index 0000000000..37b08295e6
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/median_filter2d.md
@@ -0,0 +1,69 @@
+
+
+
+
+
+# tfa.image.median_filter2d
+
+
+
+
+
+
+
+
+Perform median filtering on image(s).
+
+**Aliases**: `tfa.image.filters.median_filter2d`
+
+``` python
+tfa.image.median_filter2d(
+ image,
+ filter_shape=(3, 3),
+ padding='REFLECT',
+ constant_values=0,
+ name=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `image`: Either a 2-D `Tensor` of shape `[height, width]`,
+ a 3-D `Tensor` of shape `[height, width, channels]`,
+ or a 4-D `Tensor` of shape `[batch_size, height, width, channels]`.
+* `filter_shape`: An `integer` or `tuple`/`list` of 2 integers, specifying
+ the height and width of the 2-D median filter. Can be a single integer
+ to specify the same value for all spatial dimensions.
+* `padding`: A `string`, one of "REFLECT", "CONSTANT", or "SYMMETRIC".
+ The type of padding algorithm to use, which is compatible with
+ `mode` argument in `tf.pad`. For more details, please refer to
+ https://www.tensorflow.org/api_docs/python/tf/pad.
+* `constant_values`: A `scalar`, the pad value to use in "CONSTANT"
+ padding mode.
+* `name`: A name for this operation (optional).
+
+#### Returns:
+
+3-D or 4-D `Tensor` of the same dtype as input.
+
+
+#### Raises:
+
+
+* `ValueError`: If `image` is not 2, 3 or 4-dimensional,
+ if `padding` is other than "REFLECT", "CONSTANT" or "SYMMETRIC",
+ or if `filter_shape` is invalid.
+
diff --git a/docs/api_docs/python/tfa/image/random_hsv_in_yiq.md b/docs/api_docs/python/tfa/image/random_hsv_in_yiq.md
new file mode 100644
index 0000000000..b73e1a05fb
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/random_hsv_in_yiq.md
@@ -0,0 +1,78 @@
+
+
+
+
+
+# tfa.image.random_hsv_in_yiq
+
+
+
+
+
+
+
+
+Adjust hue, saturation, value of an RGB image randomly in YIQ color
+
+**Aliases**: `tfa.image.distort_image_ops.random_hsv_in_yiq`
+
+``` python
+tfa.image.random_hsv_in_yiq(
+ image,
+ max_delta_hue=0,
+ lower_saturation=1,
+ upper_saturation=1,
+ lower_value=1,
+ upper_value=1,
+ seed=None,
+ name=None
+)
+```
+
+
+
+
+space.
+
+Equivalent to `adjust_yiq_hsv()` but uses a `delta_h` randomly
+picked in the interval `[-max_delta_hue, max_delta_hue]`, a
+`scale_saturation` randomly picked in the interval
+`[lower_saturation, upper_saturation]`, and a `scale_value`
+randomly picked in the interval `[lower_saturation, upper_saturation]`.
+
+#### Args:
+
+
+* `image`: RGB image or images. Size of the last dimension must be 3.
+* `max_delta_hue`: float. Maximum value for the random delta_hue. Passing 0
+ disables adjusting hue.
+* `lower_saturation`: float. Lower bound for the random scale_saturation.
+* `upper_saturation`: float. Upper bound for the random scale_saturation.
+* `lower_value`: float. Lower bound for the random scale_value.
+* `upper_value`: float. Upper bound for the random scale_value.
+* `seed`: An operation-specific seed. It will be used in conjunction
+ with the graph-level seed to determine the real seeds that will be
+ used in this operation. Please see the documentation of
+ set_random_seed for its interaction with the graph-level random seed.
+* `name`: A name for this operation (optional).
+
+
+#### Returns:
+
+3-D float tensor of shape `[height, width, channels]`.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if `max_delta`, `lower_saturation`, `upper_saturation`,
+ `lower_value`, or `upper_value` is invalid.
+
diff --git a/docs/api_docs/python/tfa/image/resampler.md b/docs/api_docs/python/tfa/image/resampler.md
new file mode 100644
index 0000000000..a49b055919
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/resampler.md
@@ -0,0 +1,57 @@
+
+
+
+
+
+# tfa.image.resampler
+
+
+
+
+
+
+
+
+Resamples input data at user defined coordinates.
+
+**Aliases**: `tfa.image.resampler_ops.resampler`
+
+``` python
+tfa.image.resampler(
+ data,
+ warp,
+ name=None
+)
+```
+
+
+
+
+
+The resampler currently only supports bilinear interpolation of 2D data.
+Args:
+ data: Tensor of shape `[batch_size, data_height, data_width,
+ data_num_channels]` containing 2D data that will be resampled.
+ warp: Tensor of minimum rank 2 containing the coordinates at
+ which resampling will be performed. Since only bilinear
+ interpolation is currently supported, the last dimension of the
+ `warp` tensor must be 2, representing the (x, y) coordinate where
+ x is the index for width and y is the index for height.
+ name: Optional name of the op.
+Returns:
+ Tensor of resampled values from `data`. The output tensor shape
+ is determined by the shape of the warp tensor. For example, if `data`
+ is of shape `[batch_size, data_height, data_width, data_num_channels]`
+ and warp of shape `[batch_size, dim_0, ... , dim_n, 2]` the output will
+ be of shape `[batch_size, dim_0, ... , dim_n, data_num_channels]`.
+Raises:
+ ImportError: if the wrapper generated during compilation is not
+ present when the function is called.
+
diff --git a/docs/api_docs/python/tfa/image/resampler_ops.md b/docs/api_docs/python/tfa/image/resampler_ops.md
new file mode 100644
index 0000000000..b02b48bcb8
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/resampler_ops.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.image.resampler_ops
+
+
+
+
+
+
+Python layer for Resampler.
+
+
+
+## Functions
+
+[`resampler(...)`](../../tfa/image/resampler.md): Resamples input data at user defined coordinates.
+
+
+
diff --git a/docs/api_docs/python/tfa/image/rotate.md b/docs/api_docs/python/tfa/image/rotate.md
new file mode 100644
index 0000000000..ca0a49ce89
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/rotate.md
@@ -0,0 +1,66 @@
+
+
+
+
+
+# tfa.image.rotate
+
+
+
+
+
+
+
+
+Rotate image(s) counterclockwise by the passed angle(s) in radians.
+
+**Aliases**: `tfa.image.transform_ops.rotate`
+
+``` python
+tfa.image.rotate(
+ images,
+ angles,
+ interpolation='NEAREST',
+ name=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `images`: A tensor of shape
+ (num_images, num_rows, num_columns, num_channels)
+ (NHWC), (num_rows, num_columns, num_channels) (HWC), or
+ (num_rows, num_columns) (HW).
+* `angles`: A scalar angle to rotate all images by, or (if images has rank 4)
+ a vector of length num_images, with an angle for each image in the
+ batch.
+* `interpolation`: Interpolation mode. Supported values: "NEAREST",
+ "BILINEAR".
+* `name`: The name of the op.
+
+
+#### Returns:
+
+Image(s) with the same type and shape as `images`, rotated by the given
+angle(s). Empty space due to the rotation will be filled with zeros.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `image` is an invalid type.
+
diff --git a/docs/api_docs/python/tfa/image/sparse_image_warp.md b/docs/api_docs/python/tfa/image/sparse_image_warp.md
new file mode 100644
index 0000000000..e80f55152b
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/sparse_image_warp.md
@@ -0,0 +1,91 @@
+
+
+
+
+
+# tfa.image.sparse_image_warp
+
+
+
+
+
+
+
+
+Image warping using correspondences between sparse control points.
+
+``` python
+tfa.image.sparse_image_warp(
+ image,
+ source_control_point_locations,
+ dest_control_point_locations,
+ interpolation_order=2,
+ regularization_weight=0.0,
+ num_boundary_points=0,
+ name='sparse_image_warp'
+)
+```
+
+
+
+
+
+Apply a non-linear warp to the image, where the warp is specified by
+the source and destination locations of a (potentially small) number of
+control points. First, we use a polyharmonic spline
+(`tf.contrib.image.interpolate_spline`) to interpolate the displacements
+between the corresponding control points to a dense flow field.
+Then, we warp the image using this dense flow field
+(`tf.contrib.image.dense_image_warp`).
+
+Let t index our control points. For regularization_weight=0, we have:
+warped_image[b, dest_control_point_locations[b, t, 0],
+ dest_control_point_locations[b, t, 1], :] =
+image[b, source_control_point_locations[b, t, 0],
+ source_control_point_locations[b, t, 1], :].
+
+For regularization_weight > 0, this condition is met approximately, since
+regularized interpolation trades off smoothness of the interpolant vs.
+reconstruction of the interpolant at the control points.
+See `tf.contrib.image.interpolate_spline` for further documentation of the
+interpolation_order and regularization_weight arguments.
+
+
+#### Args:
+
+
+* `image`: `[batch, height, width, channels]` float `Tensor`
+* `source_control_point_locations`: `[batch, num_control_points, 2]` float
+ `Tensor`
+* `dest_control_point_locations`: `[batch, num_control_points, 2]` float
+ `Tensor`
+* `interpolation_order`: polynomial order used by the spline interpolation
+* `regularization_weight`: weight on smoothness regularizer in interpolation
+* `num_boundary_points`: How many zero-flow boundary points to include at
+ each image edge.Usage:
+ num_boundary_points=0: don't add zero-flow points
+ num_boundary_points=1: 4 corners of the image
+ num_boundary_points=2: 4 corners and one in the middle of each edge
+ (8 points total)
+ num_boundary_points=n: 4 corners and n-1 along each edge
+* `name`: A name for the operation (optional).
+
+Note that image and offsets can be of type tf.half, tf.float32, or
+tf.float64, and do not necessarily have to be the same type.
+
+
+#### Returns:
+
+
+* `warped_image`: `[batch, height, width, channels]` float `Tensor` with same
+ type as input image.
+* `flow_field`: `[batch, height, width, 2]` float `Tensor` containing the
+ dense flow field produced by the interpolation.
+
diff --git a/docs/api_docs/python/tfa/image/transform.md b/docs/api_docs/python/tfa/image/transform.md
new file mode 100644
index 0000000000..c577dc57e2
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/transform.md
@@ -0,0 +1,76 @@
+
+
+
+
+
+# tfa.image.transform
+
+
+
+
+
+
+
+
+Applies the given transform(s) to the image(s).
+
+**Aliases**: `tfa.image.transform_ops.transform`, `tfa.image.translate_ops.transform`
+
+``` python
+tfa.image.transform(
+ images,
+ transforms,
+ interpolation='NEAREST',
+ output_shape=None,
+ name=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `images`: A tensor of shape (num_images, num_rows, num_columns,
+ num_channels) (NHWC), (num_rows, num_columns, num_channels) (HWC), or
+ (num_rows, num_columns) (HW).
+* `transforms`: Projective transform matrix/matrices. A vector of length 8 or
+ tensor of size N x 8. If one row of transforms is
+ [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
+ `(x, y)` to a transformed *input* point
+ `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
+ where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to
+ the transform mapping input points to output points. Note that
+ gradients are not backpropagated into transformation parameters.
+* `interpolation`: Interpolation mode.
+ Supported values: "NEAREST", "BILINEAR".
+* `output_shape`: Output dimesion after the transform, [height, width].
+ If None, output is the same size as input image.
+
+* `name`: The name of the op.
+
+
+#### Returns:
+
+Image(s) with the same type and shape as `images`, with the given
+transform(s) applied. Transformed coordinates outside of the input image
+will be filled with zeros.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `image` is an invalid type.
+* `ValueError`: If output shape is not 1-D int32 Tensor.
+
diff --git a/docs/api_docs/python/tfa/image/transform_ops.md b/docs/api_docs/python/tfa/image/transform_ops.md
new file mode 100644
index 0000000000..c7eaa46a48
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/transform_ops.md
@@ -0,0 +1,39 @@
+
+
+
+
+
+# Module: tfa.image.transform_ops
+
+
+
+
+
+
+Image transform ops.
+
+
+
+## Functions
+
+[`angles_to_projective_transforms(...)`](../../tfa/image/transform_ops/angles_to_projective_transforms.md): Returns projective transform(s) for the given angle(s).
+
+[`compose_transforms(...)`](../../tfa/image/transform_ops/compose_transforms.md): Composes the transforms tensors.
+
+[`flat_transforms_to_matrices(...)`](../../tfa/image/transform_ops/flat_transforms_to_matrices.md): Converts projective transforms to affine matrices.
+
+[`matrices_to_flat_transforms(...)`](../../tfa/image/transform_ops/matrices_to_flat_transforms.md): Converts affine matrices to projective transforms.
+
+[`rotate(...)`](../../tfa/image/rotate.md): Rotate image(s) counterclockwise by the passed angle(s) in radians.
+
+[`transform(...)`](../../tfa/image/transform.md): Applies the given transform(s) to the image(s).
+
+
+
diff --git a/docs/api_docs/python/tfa/image/transform_ops/angles_to_projective_transforms.md b/docs/api_docs/python/tfa/image/transform_ops/angles_to_projective_transforms.md
new file mode 100644
index 0000000000..3f88240454
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/transform_ops/angles_to_projective_transforms.md
@@ -0,0 +1,53 @@
+
+
+
+
+
+# tfa.image.transform_ops.angles_to_projective_transforms
+
+
+
+
+
+
+
+
+Returns projective transform(s) for the given angle(s).
+
+``` python
+tfa.image.transform_ops.angles_to_projective_transforms(
+ angles,
+ image_height,
+ image_width,
+ name=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `angles`: A scalar angle to rotate all images by, or (for batches of
+ images) a vector with an angle to rotate each image in the batch. The
+ rank must be statically known (the shape is not `TensorShape(None)`.
+* `image_height`: Height of the image(s) to be transformed.
+* `image_width`: Width of the image(s) to be transformed.
+
+
+#### Returns:
+
+A tensor of shape (num_images, 8). Projective transforms which can be
+given to `transform` op.
+
+
diff --git a/docs/api_docs/python/tfa/image/transform_ops/compose_transforms.md b/docs/api_docs/python/tfa/image/transform_ops/compose_transforms.md
new file mode 100644
index 0000000000..a0a79c89cc
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/transform_ops/compose_transforms.md
@@ -0,0 +1,52 @@
+
+
+
+
+
+# tfa.image.transform_ops.compose_transforms
+
+
+
+
+
+
+
+
+Composes the transforms tensors.
+
+``` python
+tfa.image.transform_ops.compose_transforms(
+ transforms,
+ name=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `transforms`: List of image projective transforms to be composed. Each
+ transform is length 8 (single transform) or shape (N, 8) (batched
+ transforms). The shapes of all inputs must be equal, and at least one
+ input must be given.
+* `name`: The name for the op.
+
+
+#### Returns:
+
+A composed transform tensor. When passed to `transform` op,
+ equivalent to applying each of the given transforms to the image in
+ order.
+
+
diff --git a/docs/api_docs/python/tfa/image/transform_ops/flat_transforms_to_matrices.md b/docs/api_docs/python/tfa/image/transform_ops/flat_transforms_to_matrices.md
new file mode 100644
index 0000000000..d2ab0b6d8b
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/transform_ops/flat_transforms_to_matrices.md
@@ -0,0 +1,58 @@
+
+
+
+
+
+# tfa.image.transform_ops.flat_transforms_to_matrices
+
+
+
+
+
+
+
+
+Converts projective transforms to affine matrices.
+
+``` python
+tfa.image.transform_ops.flat_transforms_to_matrices(
+ transforms,
+ name=None
+)
+```
+
+
+
+
+
+Note that the output matrices map output coordinates to input coordinates.
+For the forward transformation matrix, call `tf.linalg.inv` on the result.
+
+#### Args:
+
+
+* `transforms`: Vector of length 8, or batches of transforms with shape
+ `(N, 8)`.
+* `name`: The name for the op.
+
+
+#### Returns:
+
+3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the
+ *output coordinates* (in homogeneous coordinates) of each transform to
+ the corresponding *input coordinates*.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If `transforms` have an invalid shape.
+
diff --git a/docs/api_docs/python/tfa/image/transform_ops/matrices_to_flat_transforms.md b/docs/api_docs/python/tfa/image/transform_ops/matrices_to_flat_transforms.md
new file mode 100644
index 0000000000..5d4f3b6135
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/transform_ops/matrices_to_flat_transforms.md
@@ -0,0 +1,59 @@
+
+
+
+
+
+# tfa.image.transform_ops.matrices_to_flat_transforms
+
+
+
+
+
+
+
+
+Converts affine matrices to projective transforms.
+
+``` python
+tfa.image.transform_ops.matrices_to_flat_transforms(
+ transform_matrices,
+ name=None
+)
+```
+
+
+
+
+
+Note that we expect matrices that map output coordinates to input
+coordinates. To convert forward transformation matrices,
+call `tf.linalg.inv` on the matrices and use the result here.
+
+#### Args:
+
+
+* `transform_matrices`: One or more affine transformation matrices, for the
+ reverse transformation in homogeneous coordinates. Shape `(3, 3)` or
+ `(N, 3, 3)`.
+* `name`: The name for the op.
+
+
+#### Returns:
+
+2D tensor of flat transforms with shape `(N, 8)`, which may be passed
+into `transform` op.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If `transform_matrices` have an invalid shape.
+
diff --git a/docs/api_docs/python/tfa/image/translate.md b/docs/api_docs/python/tfa/image/translate.md
new file mode 100644
index 0000000000..53dab3e437
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/translate.md
@@ -0,0 +1,66 @@
+
+
+
+
+
+# tfa.image.translate
+
+
+
+
+
+
+
+
+Translate image(s) by the passed vectors(s).
+
+**Aliases**: `tfa.image.translate_ops.translate`
+
+``` python
+tfa.image.translate(
+ images,
+ translations,
+ interpolation='NEAREST',
+ name=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `images`: A tensor of shape
+ (num_images, num_rows, num_columns, num_channels) (NHWC),
+ (num_rows, num_columns, num_channels) (HWC), or
+ (num_rows, num_columns) (HW). The rank must be statically known (the
+ shape is not `TensorShape(None)`).
+* `translations`: A vector representing [dx, dy] or (if images has rank 4)
+ a matrix of length num_images, with a [dx, dy] vector for each image
+ in the batch.
+* `interpolation`: Interpolation mode. Supported values: "NEAREST",
+ "BILINEAR".
+* `name`: The name of the op.
+
+#### Returns:
+
+Image(s) with the same type and shape as `images`, translated by the
+given vector(s). Empty space due to the translation will be filled with
+zeros.
+
+
+#### Raises:
+
+
+* `TypeError`: If `images` is an invalid type.
+
diff --git a/docs/api_docs/python/tfa/image/translate_ops.md b/docs/api_docs/python/tfa/image/translate_ops.md
new file mode 100644
index 0000000000..e98f9f7128
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/translate_ops.md
@@ -0,0 +1,33 @@
+
+
+
+
+
+# Module: tfa.image.translate_ops
+
+
+
+
+
+
+Image translate ops.
+
+
+
+## Functions
+
+[`transform(...)`](../../tfa/image/transform.md): Applies the given transform(s) to the image(s).
+
+[`translate(...)`](../../tfa/image/translate.md): Translate image(s) by the passed vectors(s).
+
+[`translations_to_projective_transforms(...)`](../../tfa/image/translate_ops/translations_to_projective_transforms.md): Returns projective transform(s) for the given translation(s).
+
+
+
diff --git a/docs/api_docs/python/tfa/image/translate_ops/translations_to_projective_transforms.md b/docs/api_docs/python/tfa/image/translate_ops/translations_to_projective_transforms.md
new file mode 100644
index 0000000000..ba80eddf80
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/translate_ops/translations_to_projective_transforms.md
@@ -0,0 +1,50 @@
+
+
+
+
+
+# tfa.image.translate_ops.translations_to_projective_transforms
+
+
+
+
+
+
+
+
+Returns projective transform(s) for the given translation(s).
+
+``` python
+tfa.image.translate_ops.translations_to_projective_transforms(
+ translations,
+ name=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `translations`: A 2-element list representing [dx, dy] or a matrix of
+ 2-element lists representing [dx, dy] to translate for each image
+ (for a batch of images). The rank must be statically known
+ (the shape is not `TensorShape(None)`).
+* `name`: The name of the op.
+
+#### Returns:
+
+A tensor of shape (num_images, 8) projective transforms which can be
+given to tfa.image.transform
.
+
+
diff --git a/docs/api_docs/python/tfa/image/utils.md b/docs/api_docs/python/tfa/image/utils.md
new file mode 100644
index 0000000000..547bac03b9
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/utils.md
@@ -0,0 +1,33 @@
+
+
+
+
+
+# Module: tfa.image.utils
+
+
+
+
+
+
+Image util ops.
+
+
+
+## Functions
+
+[`from_4D_image(...)`](../../tfa/image/utils/from_4D_image.md): Convert back to an image with `ndims` rank.
+
+[`get_ndims(...)`](../../tfa/image/utils/get_ndims.md)
+
+[`to_4D_image(...)`](../../tfa/image/utils/to_4D_image.md): Convert 2/3/4D image to 4D image.
+
+
+
diff --git a/docs/api_docs/python/tfa/image/utils/from_4D_image.md b/docs/api_docs/python/tfa/image/utils/from_4D_image.md
new file mode 100644
index 0000000000..a340dcb85b
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/utils/from_4D_image.md
@@ -0,0 +1,47 @@
+
+
+
+
+
+# tfa.image.utils.from_4D_image
+
+
+
+
+
+
+
+
+Convert back to an image with `ndims` rank.
+
+``` python
+tfa.image.utils.from_4D_image(
+ image,
+ ndims
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `image`: 4D tensor.
+* `ndims`: The original rank of the image.
+
+
+#### Returns:
+
+`ndims`-D tensor with the same type.
+
+
diff --git a/docs/api_docs/python/tfa/image/utils/get_ndims.md b/docs/api_docs/python/tfa/image/utils/get_ndims.md
new file mode 100644
index 0000000000..a7995eae03
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/utils/get_ndims.md
@@ -0,0 +1,32 @@
+
+
+
+
+
+# tfa.image.utils.get_ndims
+
+
+
+
+
+
+
+
+
+
+``` python
+tfa.image.utils.get_ndims(image)
+```
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/image/utils/to_4D_image.md b/docs/api_docs/python/tfa/image/utils/to_4D_image.md
new file mode 100644
index 0000000000..dd048b3951
--- /dev/null
+++ b/docs/api_docs/python/tfa/image/utils/to_4D_image.md
@@ -0,0 +1,43 @@
+
+
+
+
+
+# tfa.image.utils.to_4D_image
+
+
+
+
+
+
+
+
+Convert 2/3/4D image to 4D image.
+
+``` python
+tfa.image.utils.to_4D_image(image)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `image`: 2/3/4D tensor.
+
+
+#### Returns:
+
+4D tensor with the same type.
+
+
diff --git a/docs/api_docs/python/tfa/layers.md b/docs/api_docs/python/tfa/layers.md
new file mode 100644
index 0000000000..bbd54a217f
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers.md
@@ -0,0 +1,59 @@
+
+
+
+
+
+# Module: tfa.layers
+
+
+
+
+
+
+Additional layers that conform to Keras API.
+
+
+
+## Modules
+
+[`gelu`](../tfa/layers/gelu.md) module: Implements GELU activation.
+
+[`maxout`](../tfa/layers/maxout.md) module: Implementing Maxout layer.
+
+[`normalizations`](../tfa/layers/normalizations.md) module
+
+[`optical_flow`](../tfa/layers/optical_flow.md) module: Tensorflow op performing correlation cost operation.
+
+[`poincare`](../tfa/layers/poincare.md) module: Implementing PoincareNormalize layer.
+
+[`sparsemax`](../tfa/layers/sparsemax.md) module
+
+[`wrappers`](../tfa/layers/wrappers.md) module
+
+## Classes
+
+[`class CorrelationCost`](../tfa/layers/CorrelationCost.md): Correlation Cost Layer.
+
+[`class GELU`](../tfa/layers/GELU.md): Gaussian Error Linear Unit.
+
+[`class GroupNormalization`](../tfa/layers/GroupNormalization.md): Group normalization layer.
+
+[`class InstanceNormalization`](../tfa/layers/InstanceNormalization.md): Instance normalization layer.
+
+[`class Maxout`](../tfa/layers/Maxout.md): Applies Maxout to the input.
+
+[`class PoincareNormalize`](../tfa/layers/PoincareNormalize.md): Project into the Poincare ball with norm <= 1.0 - epsilon.
+
+[`class Sparsemax`](../tfa/layers/Sparsemax.md): Sparsemax activation function [1].
+
+[`class WeightNormalization`](../tfa/layers/WeightNormalization.md): This wrapper reparameterizes a layer by decoupling the weight's
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/CorrelationCost.md b/docs/api_docs/python/tfa/layers/CorrelationCost.md
new file mode 100644
index 0000000000..37930169c4
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/CorrelationCost.md
@@ -0,0 +1,860 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.layers.CorrelationCost
+
+
+
+
+
+
+
+
+## Class `CorrelationCost`
+
+Correlation Cost Layer.
+
+
+
+**Aliases**: `tfa.layers.optical_flow.CorrelationCost`
+
+
+
+This layer implements the correlation operation from FlowNet Learning
+Optical Flow with Convolutional Networks (Fischer et al.):
+https://arxiv.org/abs/1504.06
+
+#### Args:
+
+
+* `kernel_size`: An integer specifying the height and width of the
+ patch used to compute the per-patch costs.
+* `max_displacement`: An integer specifying the maximum search radius
+ for each position.
+* `stride_1`: An integer specifying the stride length in the input.
+* `stride_2`: An integer specifying the stride length in the patch.
+* `pad`: An integer specifying the paddings in height and width.
+* `data_format`: Specifies the data format.
+ Possible values are:
+ "channels_last" float [batch, height, width, channels]
+ "channels_first" float [batch, channels, height, width]
+ Defaults to `"channels_last"`.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ kernel_size,
+ max_displacement,
+ stride_1,
+ stride_2,
+ pad,
+ data_format,
+ **kwargs
+)
+```
+
+
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+View source
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+View source
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/GELU.md b/docs/api_docs/python/tfa/layers/GELU.md
new file mode 100644
index 0000000000..79e3db78dc
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/GELU.md
@@ -0,0 +1,850 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.layers.GELU
+
+
+
+
+
+
+
+
+## Class `GELU`
+
+Gaussian Error Linear Unit.
+
+
+
+**Aliases**: `tfa.layers.gelu.GELU`
+
+
+
+A smoother version of ReLU generally used
+in the BERT or BERT architecture based models.
+Original paper: https://arxiv.org/abs/1606.08415
+
+#### Input shape:
+
+Arbitrary. Use the keyword argument `input_shape`
+(tuple of integers, does not include the samples axis)
+when using this layer as the first layer in a model.
+
+
+
+#### Output shape:
+
+Same shape as the input.
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ approximate=True,
+ **kwargs
+)
+```
+
+
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+View source
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/GroupNormalization.md b/docs/api_docs/python/tfa/layers/GroupNormalization.md
new file mode 100644
index 0000000000..93479dc6a8
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/GroupNormalization.md
@@ -0,0 +1,888 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.layers.GroupNormalization
+
+
+
+
+
+
+
+
+## Class `GroupNormalization`
+
+Group normalization layer.
+
+
+
+**Aliases**: `tfa.layers.normalizations.GroupNormalization`
+
+
+
+Group Normalization divides the channels into groups and computes
+within each group the mean and variance for normalization.
+Empirically, its accuracy is more stable than batch norm in a wide
+range of small batch sizes, if learning rate is adjusted linearly
+with batch sizes.
+
+Relation to Layer Normalization:
+If the number of groups is set to 1, then this operation becomes identical
+to Layer Normalization.
+
+Relation to Instance Normalization:
+If the number of groups is set to the
+input dimension (number of groups is equal
+to number of channels), then this operation becomes
+identical to Instance Normalization.
+
+Arguments
+ groups: Integer, the number of groups for Group Normalization.
+ Can be in the range [1, N] where N is the input dimension.
+ The input dimension must be divisible by the number of groups.
+ axis: Integer, the axis that should be normalized.
+ epsilon: Small float added to variance to avoid dividing by zero.
+ center: If True, add offset of `beta` to normalized tensor.
+ If False, `beta` is ignored.
+ scale: If True, multiply by `gamma`.
+ If False, `gamma` is not used.
+ beta_initializer: Initializer for the beta weight.
+ gamma_initializer: Initializer for the gamma weight.
+ beta_regularizer: Optional regularizer for the beta weight.
+ gamma_regularizer: Optional regularizer for the gamma weight.
+ beta_constraint: Optional constraint for the beta weight.
+ gamma_constraint: Optional constraint for the gamma weight.
+
+Input shape
+ Arbitrary. Use the keyword argument `input_shape`
+ (tuple of integers, does not include the samples axis)
+ when using this layer as the first layer in a model.
+
+Output shape
+ Same shape as input.
+References
+ - [Group Normalization](https://arxiv.org/abs/1803.08494)
+
+__init__
+
+View source
+
+``` python
+__init__(
+ groups=2,
+ axis=-1,
+ epsilon=0.001,
+ center=True,
+ scale=True,
+ beta_initializer='zeros',
+ gamma_initializer='ones',
+ beta_regularizer=None,
+ gamma_regularizer=None,
+ beta_constraint=None,
+ gamma_constraint=None,
+ **kwargs
+)
+```
+
+
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+View source
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+View source
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/InstanceNormalization.md b/docs/api_docs/python/tfa/layers/InstanceNormalization.md
new file mode 100644
index 0000000000..d626701bd3
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/InstanceNormalization.md
@@ -0,0 +1,864 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.layers.InstanceNormalization
+
+
+
+
+
+
+
+
+## Class `InstanceNormalization`
+
+Instance normalization layer.
+
+Inherits From: [`GroupNormalization`](../../tfa/layers/GroupNormalization.md)
+
+**Aliases**: `tfa.layers.normalizations.InstanceNormalization`
+
+
+
+Instance Normalization is an specific case of ```GroupNormalization```since
+it normalizes all features of one channel. The Groupsize is equal to the
+channel size. Empirically, its accuracy is more stable than batch norm in a
+wide range of small batch sizes, if learning rate is adjusted linearly
+with batch sizes.
+
+Arguments
+ axis: Integer, the axis that should be normalized.
+ epsilon: Small float added to variance to avoid dividing by zero.
+ center: If True, add offset of `beta` to normalized tensor.
+ If False, `beta` is ignored.
+ scale: If True, multiply by `gamma`.
+ If False, `gamma` is not used.
+ beta_initializer: Initializer for the beta weight.
+ gamma_initializer: Initializer for the gamma weight.
+ beta_regularizer: Optional regularizer for the beta weight.
+ gamma_regularizer: Optional regularizer for the gamma weight.
+ beta_constraint: Optional constraint for the beta weight.
+ gamma_constraint: Optional constraint for the gamma weight.
+
+Input shape
+ Arbitrary. Use the keyword argument `input_shape`
+ (tuple of integers, does not include the samples axis)
+ when using this layer as the first layer in a model.
+
+Output shape
+ Same shape as input.
+
+References
+ - [Instance Normalization: The Missing Ingredient for Fast Stylization]
+ (https://arxiv.org/abs/1607.08022)
+
+__init__
+
+View source
+
+``` python
+__init__(**kwargs)
+```
+
+
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+View source
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+View source
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/Maxout.md b/docs/api_docs/python/tfa/layers/Maxout.md
new file mode 100644
index 0000000000..bad6a42a7b
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/Maxout.md
@@ -0,0 +1,861 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.layers.Maxout
+
+
+
+
+
+
+
+
+## Class `Maxout`
+
+Applies Maxout to the input.
+
+
+
+**Aliases**: `tfa.layers.maxout.Maxout`
+
+
+
+"Maxout Networks" Ian J. Goodfellow, David Warde-Farley, Mehdi Mirza, Aaron
+Courville, Yoshua Bengio. https://arxiv.org/abs/1302.4389
+
+Usually the operation is performed in the filter/channel dimension. This
+can also be used after Dense layers to reduce number of features.
+
+#### Arguments:
+
+
+* `num_units`: Specifies how many features will remain after maxout
+ in the `axis` dimension (usually channel).
+ This must be a factor of number of features.
+* `axis`: The dimension where max pooling will be performed. Default is the
+ last dimension.
+
+
+#### Input shape:
+
+nD tensor with shape: `(batch_size, ..., axis_dim, ...)`.
+
+
+
+#### Output shape:
+
+nD tensor with shape: `(batch_size, ..., num_units, ...)`.
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ num_units,
+ axis=-1,
+ **kwargs
+)
+```
+
+
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+View source
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/PoincareNormalize.md b/docs/api_docs/python/tfa/layers/PoincareNormalize.md
new file mode 100644
index 0000000000..5bbba69d51
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/PoincareNormalize.md
@@ -0,0 +1,855 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.layers.PoincareNormalize
+
+
+
+
+
+
+
+
+## Class `PoincareNormalize`
+
+Project into the Poincare ball with norm <= 1.0 - epsilon.
+
+
+
+**Aliases**: `tfa.layers.poincare.PoincareNormalize`
+
+
+
+https://en.wikipedia.org/wiki/Poincare_ball_model
+
+Used in Poincare Embeddings for Learning Hierarchical Representations
+Maximilian Nickel, Douwe Kiela https://arxiv.org/pdf/1705.08039.pdf
+
+For a 1-D tensor with `axis = 0`, computes
+
+ (x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon
+ output =
+ x otherwise
+
+For `x` with more dimensions, independently normalizes each 1-D slice along
+dimension `axis`.
+
+#### Arguments:
+
+
+* `axis`: Axis along which to normalize. A scalar or a vector of integers.
+* `epsilon`: A small deviation from the edge of the unit sphere for
+ numerical stability.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ axis=1,
+ epsilon=1e-05,
+ **kwargs
+)
+```
+
+
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+View source
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/Sparsemax.md b/docs/api_docs/python/tfa/layers/Sparsemax.md
new file mode 100644
index 0000000000..e4a75e4b69
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/Sparsemax.md
@@ -0,0 +1,842 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.layers.Sparsemax
+
+
+
+
+
+
+
+
+## Class `Sparsemax`
+
+Sparsemax activation function [1].
+
+
+
+**Aliases**: `tfa.layers.sparsemax.Sparsemax`
+
+
+
+The output shape is the same as the input shape.
+
+[1]: https://arxiv.org/abs/1602.02068
+
+#### Arguments:
+
+
+* `axis`: Integer, axis along which the sparsemax normalization is applied.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ axis=-1,
+ **kwargs
+)
+```
+
+
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+View source
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/WeightNormalization.md b/docs/api_docs/python/tfa/layers/WeightNormalization.md
new file mode 100644
index 0000000000..32a97ebe58
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/WeightNormalization.md
@@ -0,0 +1,867 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.layers.WeightNormalization
+
+
+
+
+
+
+
+
+## Class `WeightNormalization`
+
+This wrapper reparameterizes a layer by decoupling the weight's
+
+
+
+**Aliases**: `tfa.layers.wrappers.WeightNormalization`
+
+
+magnitude and direction.
+
+This speeds up convergence by improving the
+conditioning of the optimization problem.
+Weight Normalization: A Simple Reparameterization to Accelerate
+Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
+Tim Salimans, Diederik P. Kingma (2016)
+WeightNormalization wrapper works for keras and tf layers.
+```python
+ net = WeightNormalization(
+ tf.keras.layers.Conv2D(2, 2, activation='relu'),
+ input_shape=(32, 32, 3),
+ data_init=True)(x)
+ net = WeightNormalization(
+ tf.keras.layers.Conv2D(16, 5, activation='relu'),
+ data_init=True)(net)
+ net = WeightNormalization(
+ tf.keras.layers.Dense(120, activation='relu'),
+ data_init=True)(net)
+ net = WeightNormalization(
+ tf.keras.layers.Dense(n_classes),
+ data_init=True)(net)
+```
+Arguments:
+ layer: a layer instance.
+ data_init: If `True` use data dependent variable initialization
+Raises:
+ ValueError: If not initialized with a `Layer` instance.
+ ValueError: If `Layer` does not contain a `kernel` of weights
+ NotImplementedError: If `data_init` is True and running graph execution
+
+__init__
+
+View source
+
+``` python
+__init__(
+ layer,
+ data_init=True,
+ **kwargs
+)
+```
+
+
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+View source
+
+``` python
+build(input_shape)
+```
+
+Build `Layer`
+
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+View source
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+remove
+
+View source
+
+``` python
+remove()
+```
+
+
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/gelu.md b/docs/api_docs/python/tfa/layers/gelu.md
new file mode 100644
index 0000000000..b2f1536f0e
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/gelu.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.layers.gelu
+
+
+
+
+
+
+Implements GELU activation.
+
+
+
+## Classes
+
+[`class GELU`](../../tfa/layers/GELU.md): Gaussian Error Linear Unit.
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/maxout.md b/docs/api_docs/python/tfa/layers/maxout.md
new file mode 100644
index 0000000000..7181d9778d
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/maxout.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.layers.maxout
+
+
+
+
+
+
+Implementing Maxout layer.
+
+
+
+## Classes
+
+[`class Maxout`](../../tfa/layers/Maxout.md): Applies Maxout to the input.
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/normalizations.md b/docs/api_docs/python/tfa/layers/normalizations.md
new file mode 100644
index 0000000000..84cdb141a5
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/normalizations.md
@@ -0,0 +1,31 @@
+
+
+
+
+
+# Module: tfa.layers.normalizations
+
+
+
+
+
+
+
+
+
+
+## Classes
+
+[`class GroupNormalization`](../../tfa/layers/GroupNormalization.md): Group normalization layer.
+
+[`class InstanceNormalization`](../../tfa/layers/InstanceNormalization.md): Instance normalization layer.
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/optical_flow.md b/docs/api_docs/python/tfa/layers/optical_flow.md
new file mode 100644
index 0000000000..2780682a99
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/optical_flow.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.layers.optical_flow
+
+
+
+
+
+
+Tensorflow op performing correlation cost operation.
+
+
+
+## Classes
+
+[`class CorrelationCost`](../../tfa/layers/CorrelationCost.md): Correlation Cost Layer.
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/poincare.md b/docs/api_docs/python/tfa/layers/poincare.md
new file mode 100644
index 0000000000..7b8babebcb
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/poincare.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.layers.poincare
+
+
+
+
+
+
+Implementing PoincareNormalize layer.
+
+
+
+## Classes
+
+[`class PoincareNormalize`](../../tfa/layers/PoincareNormalize.md): Project into the Poincare ball with norm <= 1.0 - epsilon.
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/sparsemax.md b/docs/api_docs/python/tfa/layers/sparsemax.md
new file mode 100644
index 0000000000..23ee7289ff
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/sparsemax.md
@@ -0,0 +1,33 @@
+
+
+
+
+
+# Module: tfa.layers.sparsemax
+
+
+
+
+
+
+
+
+
+
+## Classes
+
+[`class Sparsemax`](../../tfa/layers/Sparsemax.md): Sparsemax activation function [1].
+
+## Functions
+
+[`sparsemax(...)`](../../tfa/activations/sparsemax.md): Sparsemax activation function [1].
+
+
+
diff --git a/docs/api_docs/python/tfa/layers/wrappers.md b/docs/api_docs/python/tfa/layers/wrappers.md
new file mode 100644
index 0000000000..1d4a7d7bfd
--- /dev/null
+++ b/docs/api_docs/python/tfa/layers/wrappers.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.layers.wrappers
+
+
+
+
+
+
+
+
+
+
+## Classes
+
+[`class WeightNormalization`](../../tfa/layers/WeightNormalization.md): This wrapper reparameterizes a layer by decoupling the weight's
+
+
+
diff --git a/docs/api_docs/python/tfa/losses.md b/docs/api_docs/python/tfa/losses.md
new file mode 100644
index 0000000000..0c5a6f6a20
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses.md
@@ -0,0 +1,75 @@
+
+
+
+
+
+# Module: tfa.losses
+
+
+
+
+
+
+Additional losses that conform to Keras API.
+
+
+
+## Modules
+
+[`contrastive`](../tfa/losses/contrastive.md) module: Implements contrastive loss.
+
+[`focal_loss`](../tfa/losses/focal_loss.md) module: Implements Focal loss.
+
+[`lifted`](../tfa/losses/lifted.md) module: Implements lifted_struct_loss.
+
+[`metric_learning`](../tfa/losses/metric_learning.md) module: Functions of metric learning.
+
+[`npairs`](../tfa/losses/npairs.md) module: Implements npairs loss.
+
+[`triplet`](../tfa/losses/triplet.md) module: Implements triplet loss.
+
+## Classes
+
+[`class ContrastiveLoss`](../tfa/losses/ContrastiveLoss.md): Computes the contrastive loss between `y_true` and `y_pred`.
+
+[`class GIoULoss`](../tfa/losses/GIoULoss.md): Implements the GIoU loss function.
+
+[`class LiftedStructLoss`](../tfa/losses/LiftedStructLoss.md): Computes the lifted structured loss.
+
+[`class NpairsLoss`](../tfa/losses/NpairsLoss.md): Computes the npairs loss between `y_true` and `y_pred`.
+
+[`class NpairsMultilabelLoss`](../tfa/losses/NpairsMultilabelLoss.md): Computes the npairs loss between multilabel data `y_true` and `y_pred`.
+
+[`class SigmoidFocalCrossEntropy`](../tfa/losses/SigmoidFocalCrossEntropy.md): Implements the focal loss function.
+
+[`class SparsemaxLoss`](../tfa/losses/SparsemaxLoss.md): Sparsemax loss function.
+
+[`class TripletSemiHardLoss`](../tfa/losses/TripletSemiHardLoss.md): Computes the triplet loss with semi-hard negative mining.
+
+## Functions
+
+[`contrastive_loss(...)`](../tfa/losses/contrastive_loss.md): Computes the contrastive loss between `y_true` and `y_pred`.
+
+[`giou_loss(...)`](../tfa/losses/giou_loss.md): Args:
+
+[`lifted_struct_loss(...)`](../tfa/losses/lifted_struct_loss.md): Computes the lifted structured loss.
+
+[`npairs_loss(...)`](../tfa/losses/npairs_loss.md): Computes the npairs loss between `y_true` and `y_pred`.
+
+[`npairs_multilabel_loss(...)`](../tfa/losses/npairs_multilabel_loss.md): Computes the npairs loss between multilabel data `y_true` and `y_pred`.
+
+[`sigmoid_focal_crossentropy(...)`](../tfa/losses/sigmoid_focal_crossentropy.md): Args
+
+[`sparsemax_loss(...)`](../tfa/losses/sparsemax_loss.md): Sparsemax loss function [1].
+
+[`triplet_semihard_loss(...)`](../tfa/losses/triplet_semihard_loss.md): Computes the triplet loss with semi-hard negative mining.
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/ContrastiveLoss.md b/docs/api_docs/python/tfa/losses/ContrastiveLoss.md
new file mode 100644
index 0000000000..502b3b3589
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/ContrastiveLoss.md
@@ -0,0 +1,163 @@
+
+
+
+
+
+
+
+
+
+# tfa.losses.ContrastiveLoss
+
+
+
+
+
+
+
+
+## Class `ContrastiveLoss`
+
+Computes the contrastive loss between `y_true` and `y_pred`.
+
+
+
+**Aliases**: `tfa.losses.contrastive.ContrastiveLoss`
+
+
+
+This loss encourages the embedding to be close to each other for
+the samples of the same label and the embedding to be far apart at least
+by the margin constant for the samples of different labels.
+
+See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
+
+We expect labels `y_true` to be provided as 1-D integer `Tensor`
+with shape [batch_size] of binary integer labels. And `y_pred` must be
+1-D float `Tensor` with shape [batch_size] of distances between two
+embedding matrices.
+
+The euclidean distances `y_pred` between two embedding matrices
+`a` and `b` with shape [batch_size, hidden_size] can be computed
+as follows:
+
+```python
+# y_pred = \sqrt (\sum_i (a[:, i] - b[:, i])^2)
+y_pred = tf.linalg.norm(a - b, axis=1)
+```
+
+#### Args:
+
+
+* `margin`: `Float`, margin term in the loss definition.
+ Default value is 1.0.
+* `reduction`: (Optional) Type of `tf.keras.losses.Reduction` to apply.
+ Default value is `SUM_OVER_BATCH_SIZE`.
+* `name`: (Optional) name for the loss.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ margin=1.0,
+ reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
+ name='contrasitve_loss'
+)
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Invokes the `Loss` instance.
+
+
+#### Args:
+
+
+* `y_true`: Ground truth values. shape = `[batch_size, d0, .. dN]`
+* `y_pred`: The predicted values. shape = `[batch_size, d0, .. dN]`
+* `sample_weight`: Optional `sample_weight` acts as a
+ coefficient for the loss. If a scalar is provided, then the loss is
+ simply scaled by the given value. If `sample_weight` is a tensor of size
+ `[batch_size]`, then the total loss for each sample of the batch is
+ rescaled by the corresponding element in the `sample_weight` vector. If
+ the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
+ broadcasted to this shape), then each loss element of `y_pred` is scaled
+ by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
+ functions reduce by 1 dimension, usually axis=-1.)
+
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
+ shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
+ because all loss functions reduce by 1 dimension, usually axis=-1.)
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the shape of `sample_weight` is invalid.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `Loss` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/GIoULoss.md b/docs/api_docs/python/tfa/losses/GIoULoss.md
new file mode 100644
index 0000000000..cf135f595c
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/GIoULoss.md
@@ -0,0 +1,161 @@
+
+
+
+
+
+
+
+
+
+# tfa.losses.GIoULoss
+
+
+
+
+
+
+
+
+## Class `GIoULoss`
+
+Implements the GIoU loss function.
+
+
+
+
+
+GIoU loss was first introduced in the
+[Generalized Intersection over Union:
+A Metric and A Loss for Bounding Box Regression]
+(https://giou.stanford.edu/GIoU.pdf).
+GIoU is an enhancement for models which use IoU in object detection.
+
+#### Usage:
+
+
+
+```python
+gl = tfa.losses.GIoULoss()
+boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
+boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
+loss = gl(boxes1, boxes2)
+print('Loss: ', loss.numpy()) # Loss: [1.07500000298023224, 1.9333333373069763]
+```
+Usage with tf.keras API:
+
+```python
+model = tf.keras.Model(inputs, outputs)
+model.compile('sgd', loss=tfa.losses.GIoULoss())
+```
+
+#### Args:
+
+
+* `mode`: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ mode='giou',
+ reduction=tf.keras.losses.Reduction.AUTO,
+ name='giou_loss'
+)
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Invokes the `Loss` instance.
+
+
+#### Args:
+
+
+* `y_true`: Ground truth values. shape = `[batch_size, d0, .. dN]`
+* `y_pred`: The predicted values. shape = `[batch_size, d0, .. dN]`
+* `sample_weight`: Optional `sample_weight` acts as a
+ coefficient for the loss. If a scalar is provided, then the loss is
+ simply scaled by the given value. If `sample_weight` is a tensor of size
+ `[batch_size]`, then the total loss for each sample of the batch is
+ rescaled by the corresponding element in the `sample_weight` vector. If
+ the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
+ broadcasted to this shape), then each loss element of `y_pred` is scaled
+ by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
+ functions reduce by 1 dimension, usually axis=-1.)
+
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
+ shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
+ because all loss functions reduce by 1 dimension, usually axis=-1.)
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the shape of `sample_weight` is invalid.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `Loss` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/LiftedStructLoss.md b/docs/api_docs/python/tfa/losses/LiftedStructLoss.md
new file mode 100644
index 0000000000..404392bbe7
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/LiftedStructLoss.md
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+
+
+
+# tfa.losses.LiftedStructLoss
+
+
+
+
+
+
+
+
+## Class `LiftedStructLoss`
+
+Computes the lifted structured loss.
+
+
+
+**Aliases**: `tfa.losses.lifted.LiftedStructLoss`
+
+
+
+The loss encourages the positive distances (between a pair of embeddings
+with the same labels) to be smaller than any negative distances (between
+a pair of embeddings with different labels) in the mini-batch in a way
+that is differentiable with respect to the embedding vectors.
+See: https://arxiv.org/abs/1511.06452.
+
+#### Args:
+
+
+* `margin`: Float, margin term in the loss definition.
+* `name`: Optional name for the op.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ margin=1.0,
+ name=None,
+ **kwargs
+)
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Invokes the `Loss` instance.
+
+
+#### Args:
+
+
+* `y_true`: Ground truth values. shape = `[batch_size, d0, .. dN]`
+* `y_pred`: The predicted values. shape = `[batch_size, d0, .. dN]`
+* `sample_weight`: Optional `sample_weight` acts as a
+ coefficient for the loss. If a scalar is provided, then the loss is
+ simply scaled by the given value. If `sample_weight` is a tensor of size
+ `[batch_size]`, then the total loss for each sample of the batch is
+ rescaled by the corresponding element in the `sample_weight` vector. If
+ the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
+ broadcasted to this shape), then each loss element of `y_pred` is scaled
+ by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
+ functions reduce by 1 dimension, usually axis=-1.)
+
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
+ shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
+ because all loss functions reduce by 1 dimension, usually axis=-1.)
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the shape of `sample_weight` is invalid.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `Loss` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/NpairsLoss.md b/docs/api_docs/python/tfa/losses/NpairsLoss.md
new file mode 100644
index 0000000000..1d7310013d
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/NpairsLoss.md
@@ -0,0 +1,148 @@
+
+
+
+
+
+
+
+
+
+# tfa.losses.NpairsLoss
+
+
+
+
+
+
+
+
+## Class `NpairsLoss`
+
+Computes the npairs loss between `y_true` and `y_pred`.
+
+
+
+**Aliases**: `tfa.losses.npairs.NpairsLoss`
+
+
+
+Npairs loss expects paired data where a pair is composed of samples from
+the same labels and each pairs in the minibatch have different labels.
+The loss takes each row of the pair-wise similarity matrix, `y_pred`,
+as logits and the remapped multi-class labels, `y_true`, as labels.
+
+The similarity matrix `y_pred` between two embedding matrices `a` and `b`
+with shape `[batch_size, hidden_size]` can be computed as follows:
+
+```python
+# y_pred = a * b^T
+y_pred = tf.matmul(a, b, transpose_a=False, transpose_b=True)
+```
+
+See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
+
+#### Args:
+
+
+* `name`: (Optional) name for the loss.
+
+__init__
+
+View source
+
+``` python
+__init__(name='npairs_loss')
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Invokes the `Loss` instance.
+
+
+#### Args:
+
+
+* `y_true`: Ground truth values. shape = `[batch_size, d0, .. dN]`
+* `y_pred`: The predicted values. shape = `[batch_size, d0, .. dN]`
+* `sample_weight`: Optional `sample_weight` acts as a
+ coefficient for the loss. If a scalar is provided, then the loss is
+ simply scaled by the given value. If `sample_weight` is a tensor of size
+ `[batch_size]`, then the total loss for each sample of the batch is
+ rescaled by the corresponding element in the `sample_weight` vector. If
+ the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
+ broadcasted to this shape), then each loss element of `y_pred` is scaled
+ by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
+ functions reduce by 1 dimension, usually axis=-1.)
+
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
+ shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
+ because all loss functions reduce by 1 dimension, usually axis=-1.)
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the shape of `sample_weight` is invalid.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `Loss` instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/NpairsMultilabelLoss.md b/docs/api_docs/python/tfa/losses/NpairsMultilabelLoss.md
new file mode 100644
index 0000000000..05f828ac74
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/NpairsMultilabelLoss.md
@@ -0,0 +1,162 @@
+
+
+
+
+
+
+
+
+
+# tfa.losses.NpairsMultilabelLoss
+
+
+
+
+
+
+
+
+## Class `NpairsMultilabelLoss`
+
+Computes the npairs loss between multilabel data `y_true` and `y_pred`.
+
+
+
+**Aliases**: `tfa.losses.npairs.NpairsMultilabelLoss`
+
+
+
+Npairs loss expects paired data where a pair is composed of samples from
+the same labels and each pairs in the minibatch have different labels.
+The loss takes each row of the pair-wise similarity matrix, `y_pred`,
+as logits and the remapped multi-class labels, `y_true`, as labels.
+
+To deal with multilabel inputs, the count of label intersection
+is computed as follows:
+
+```
+L_{i,j} = | set_of_labels_for(i) \cap set_of_labels_for(j) |
+```
+
+Each row of the count based label matrix is further normalized so that
+each row sums to one.
+
+`y_true` should be a binary indicator for classes.
+That is, if `y_true[i, j] = 1`, then `i`th sample is in `j`th class;
+if `y_true[i, j] = 0`, then `i`th sample is not in `j`th class.
+
+The similarity matrix `y_pred` between two embedding matrices `a` and `b`
+with shape `[batch_size, hidden_size]` can be computed as follows:
+
+```python
+# y_pred = a * b^T
+y_pred = tf.matmul(a, b, transpose_a=False, transpose_b=True)
+```
+
+See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
+
+#### Args:
+
+
+* `name`: (Optional) name for the loss.
+
+__init__
+
+View source
+
+``` python
+__init__(name='npairs_multilabel_loss')
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Invokes the `Loss` instance.
+
+
+#### Args:
+
+
+* `y_true`: Ground truth values. shape = `[batch_size, d0, .. dN]`
+* `y_pred`: The predicted values. shape = `[batch_size, d0, .. dN]`
+* `sample_weight`: Optional `sample_weight` acts as a
+ coefficient for the loss. If a scalar is provided, then the loss is
+ simply scaled by the given value. If `sample_weight` is a tensor of size
+ `[batch_size]`, then the total loss for each sample of the batch is
+ rescaled by the corresponding element in the `sample_weight` vector. If
+ the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
+ broadcasted to this shape), then each loss element of `y_pred` is scaled
+ by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
+ functions reduce by 1 dimension, usually axis=-1.)
+
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
+ shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
+ because all loss functions reduce by 1 dimension, usually axis=-1.)
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the shape of `sample_weight` is invalid.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `Loss` instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy.md b/docs/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy.md
new file mode 100644
index 0000000000..663fcb4cf3
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy.md
@@ -0,0 +1,182 @@
+
+
+
+
+
+
+
+
+
+# tfa.losses.SigmoidFocalCrossEntropy
+
+
+
+
+
+
+
+
+## Class `SigmoidFocalCrossEntropy`
+
+Implements the focal loss function.
+
+
+
+**Aliases**: `tfa.losses.focal_loss.SigmoidFocalCrossEntropy`
+
+
+
+Focal loss was first introduced in the RetinaNet paper
+(https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for
+classification when you have highly imbalanced classes. It down-weights
+well-classified examples and focuses on hard examples. The loss value is
+much high for a sample which is misclassified by the classifier as compared
+to the loss value corresponding to a well-classified example. One of the
+best use-cases of focal loss is its usage in object detection where the
+imbalance between the background class and other classes is extremely high.
+
+#### Usage:
+
+
+
+```python
+fl = tfa.losses.SigmoidFocalCrossEntropy()
+loss = fl(
+ [[0.97], [0.91], [0.03]],
+ [[1.0], [1.0], [0.0]])
+print('Loss: ', loss.numpy()) # Loss: [0.00010971,
+ 0.0032975,
+ 0.00030611]
+```
+Usage with tf.keras API:
+
+```python
+model = tf.keras.Model(inputs, outputs)
+model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy())
+```
+
+Args
+ alpha: balancing factor, default value is 0.25
+ gamma: modulating factor, default value is 2.0
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
+ shape as `y_true`; otherwise, it is scalar.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the shape of `sample_weight` is invalid or value of
+ `gamma` is less than zero
+
+__init__
+
+View source
+
+``` python
+__init__(
+ from_logits=False,
+ alpha=0.25,
+ gamma=2.0,
+ reduction=tf.keras.losses.Reduction.NONE,
+ name='sigmoid_focal_crossentropy'
+)
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Invokes the `Loss` instance.
+
+
+#### Args:
+
+
+* `y_true`: Ground truth values. shape = `[batch_size, d0, .. dN]`
+* `y_pred`: The predicted values. shape = `[batch_size, d0, .. dN]`
+* `sample_weight`: Optional `sample_weight` acts as a
+ coefficient for the loss. If a scalar is provided, then the loss is
+ simply scaled by the given value. If `sample_weight` is a tensor of size
+ `[batch_size]`, then the total loss for each sample of the batch is
+ rescaled by the corresponding element in the `sample_weight` vector. If
+ the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
+ broadcasted to this shape), then each loss element of `y_pred` is scaled
+ by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
+ functions reduce by 1 dimension, usually axis=-1.)
+
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
+ shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
+ because all loss functions reduce by 1 dimension, usually axis=-1.)
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the shape of `sample_weight` is invalid.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `Loss` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/SparsemaxLoss.md b/docs/api_docs/python/tfa/losses/SparsemaxLoss.md
new file mode 100644
index 0000000000..2ff2974261
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/SparsemaxLoss.md
@@ -0,0 +1,150 @@
+
+
+
+
+
+
+
+
+
+# tfa.losses.SparsemaxLoss
+
+
+
+
+
+
+
+
+## Class `SparsemaxLoss`
+
+Sparsemax loss function.
+
+
+
+
+
+Computes the generalized multi-label classification loss for the sparsemax
+function.
+
+Because the sparsemax loss function needs both the properbility output and
+the logits to compute the loss value, `from_logits` must be `True`.
+
+Because it computes the generalized multi-label loss, the shape of both
+`y_pred` and `y_true` must be `[batch_size, num_classes]`.
+
+#### Args:
+
+
+* `from_logits`: Whether `y_pred` is expected to be a logits tensor. Default
+ is `True`, meaning `y_pred` is the logits.
+* `reduction`: (Optional) Type of `tf.keras.losses.Reduction` to apply to
+ loss. Default value is `SUM_OVER_BATCH_SIZE`.
+* `name`: Optional name for the op.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ from_logits=True,
+ reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
+ name='sparsemax_loss'
+)
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Invokes the `Loss` instance.
+
+
+#### Args:
+
+
+* `y_true`: Ground truth values. shape = `[batch_size, d0, .. dN]`
+* `y_pred`: The predicted values. shape = `[batch_size, d0, .. dN]`
+* `sample_weight`: Optional `sample_weight` acts as a
+ coefficient for the loss. If a scalar is provided, then the loss is
+ simply scaled by the given value. If `sample_weight` is a tensor of size
+ `[batch_size]`, then the total loss for each sample of the batch is
+ rescaled by the corresponding element in the `sample_weight` vector. If
+ the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
+ broadcasted to this shape), then each loss element of `y_pred` is scaled
+ by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
+ functions reduce by 1 dimension, usually axis=-1.)
+
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
+ shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
+ because all loss functions reduce by 1 dimension, usually axis=-1.)
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the shape of `sample_weight` is invalid.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `Loss` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/TripletSemiHardLoss.md b/docs/api_docs/python/tfa/losses/TripletSemiHardLoss.md
new file mode 100644
index 0000000000..2f892e5d9a
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/TripletSemiHardLoss.md
@@ -0,0 +1,151 @@
+
+
+
+
+
+
+
+
+
+# tfa.losses.TripletSemiHardLoss
+
+
+
+
+
+
+
+
+## Class `TripletSemiHardLoss`
+
+Computes the triplet loss with semi-hard negative mining.
+
+
+
+**Aliases**: `tfa.losses.triplet.TripletSemiHardLoss`
+
+
+
+The loss encourages the positive distances (between a pair of embeddings
+with the same labels) to be smaller than the minimum negative distance
+among which are at least greater than the positive distance plus the
+margin constant (called semi-hard negative) in the mini-batch.
+If no such negative exists, uses the largest negative distance instead.
+See: https://arxiv.org/abs/1503.03832.
+
+We expect labels `y_true` to be provided as 1-D integer `Tensor` with shape
+[batch_size] of multi-class integer labels. And embeddings `y_pred` must be
+2-D float `Tensor` of l2 normalized embedding vectors.
+
+#### Args:
+
+
+* `margin`: Float, margin term in the loss definition. Default value is 1.0.
+* `name`: Optional name for the op.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ margin=1.0,
+ name=None,
+ **kwargs
+)
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Invokes the `Loss` instance.
+
+
+#### Args:
+
+
+* `y_true`: Ground truth values. shape = `[batch_size, d0, .. dN]`
+* `y_pred`: The predicted values. shape = `[batch_size, d0, .. dN]`
+* `sample_weight`: Optional `sample_weight` acts as a
+ coefficient for the loss. If a scalar is provided, then the loss is
+ simply scaled by the given value. If `sample_weight` is a tensor of size
+ `[batch_size]`, then the total loss for each sample of the batch is
+ rescaled by the corresponding element in the `sample_weight` vector. If
+ the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
+ broadcasted to this shape), then each loss element of `y_pred` is scaled
+ by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
+ functions reduce by 1 dimension, usually axis=-1.)
+
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
+ shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
+ because all loss functions reduce by 1 dimension, usually axis=-1.)
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the shape of `sample_weight` is invalid.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `Loss` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/contrastive.md b/docs/api_docs/python/tfa/losses/contrastive.md
new file mode 100644
index 0000000000..6c126a9576
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/contrastive.md
@@ -0,0 +1,33 @@
+
+
+
+
+
+# Module: tfa.losses.contrastive
+
+
+
+
+
+
+Implements contrastive loss.
+
+
+
+## Classes
+
+[`class ContrastiveLoss`](../../tfa/losses/ContrastiveLoss.md): Computes the contrastive loss between `y_true` and `y_pred`.
+
+## Functions
+
+[`contrastive_loss(...)`](../../tfa/losses/contrastive_loss.md): Computes the contrastive loss between `y_true` and `y_pred`.
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/contrastive_loss.md b/docs/api_docs/python/tfa/losses/contrastive_loss.md
new file mode 100644
index 0000000000..825a59b325
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/contrastive_loss.md
@@ -0,0 +1,67 @@
+
+
+
+
+
+# tfa.losses.contrastive_loss
+
+
+
+
+
+
+
+
+Computes the contrastive loss between `y_true` and `y_pred`.
+
+**Aliases**: `tfa.losses.contrastive.contrastive_loss`
+
+``` python
+tfa.losses.contrastive_loss(
+ y_true,
+ y_pred,
+ margin=1.0
+)
+```
+
+
+
+
+
+This loss encourages the embedding to be close to each other for
+the samples of the same label and the embedding to be far apart at least
+by the margin constant for the samples of different labels.
+
+The euclidean distances `y_pred` between two embedding matrices
+`a` and `b` with shape [batch_size, hidden_size] can be computed
+as follows:
+
+```python
+# y_pred = \sqrt (\sum_i (a[:, i] - b[:, i])^2)
+y_pred = tf.linalg.norm(a - b, axis=1)
+```
+
+See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
+
+#### Args:
+
+
+* `y_true`: 1-D integer `Tensor` with shape [batch_size] of
+ binary labels indicating positive vs negative pair.
+* `y_pred`: 1-D float `Tensor` with shape [batch_size] of
+ distances between two embedding matrices.
+* `margin`: margin term in the loss definition.
+
+
+#### Returns:
+
+
+* `contrastive_loss`: 1-D float `Tensor` with shape [batch_size].
+
diff --git a/docs/api_docs/python/tfa/losses/focal_loss.md b/docs/api_docs/python/tfa/losses/focal_loss.md
new file mode 100644
index 0000000000..b7cb0c39dc
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/focal_loss.md
@@ -0,0 +1,33 @@
+
+
+
+
+
+# Module: tfa.losses.focal_loss
+
+
+
+
+
+
+Implements Focal loss.
+
+
+
+## Classes
+
+[`class SigmoidFocalCrossEntropy`](../../tfa/losses/SigmoidFocalCrossEntropy.md): Implements the focal loss function.
+
+## Functions
+
+[`sigmoid_focal_crossentropy(...)`](../../tfa/losses/sigmoid_focal_crossentropy.md): Args
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/giou_loss.md b/docs/api_docs/python/tfa/losses/giou_loss.md
new file mode 100644
index 0000000000..f68194c88c
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/giou_loss.md
@@ -0,0 +1,45 @@
+
+
+
+
+
+# tfa.losses.giou_loss
+
+
+
+
+
+
+
+
+Args:
+
+``` python
+tfa.losses.giou_loss(
+ y_true,
+ y_pred,
+ mode='giou'
+)
+```
+
+
+
+
+ y_true: true targets tensor. The coordinates of the each bounding
+ box in boxes are encoded as [y_min, x_min, y_max, x_max].
+ y_pred: predictions tensor. The coordinates of the each bounding
+ box in boxes are encoded as [y_min, x_min, y_max, x_max].
+ mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.
+
+#### Returns:
+
+GIoU loss float `Tensor`.
+
+
diff --git a/docs/api_docs/python/tfa/losses/lifted.md b/docs/api_docs/python/tfa/losses/lifted.md
new file mode 100644
index 0000000000..049410cddb
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/lifted.md
@@ -0,0 +1,33 @@
+
+
+
+
+
+# Module: tfa.losses.lifted
+
+
+
+
+
+
+Implements lifted_struct_loss.
+
+
+
+## Classes
+
+[`class LiftedStructLoss`](../../tfa/losses/LiftedStructLoss.md): Computes the lifted structured loss.
+
+## Functions
+
+[`lifted_struct_loss(...)`](../../tfa/losses/lifted_struct_loss.md): Computes the lifted structured loss.
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/lifted_struct_loss.md b/docs/api_docs/python/tfa/losses/lifted_struct_loss.md
new file mode 100644
index 0000000000..e3bce63ebe
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/lifted_struct_loss.md
@@ -0,0 +1,53 @@
+
+
+
+
+
+# tfa.losses.lifted_struct_loss
+
+
+
+
+
+
+
+
+Computes the lifted structured loss.
+
+**Aliases**: `tfa.losses.lifted.lifted_struct_loss`
+
+``` python
+tfa.losses.lifted_struct_loss(
+ labels,
+ embeddings,
+ margin=1.0
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `labels`: 1-D tf.int32 `Tensor` with shape [batch_size] of
+ multiclass integer labels.
+* `embeddings`: 2-D float `Tensor` of embedding vectors. Embeddings should
+ not be l2 normalized.
+* `margin`: Float, margin term in the loss definition.
+
+
+#### Returns:
+
+
+* `lifted_loss`: tf.float32 scalar.
+
diff --git a/docs/api_docs/python/tfa/losses/metric_learning.md b/docs/api_docs/python/tfa/losses/metric_learning.md
new file mode 100644
index 0000000000..5b31433222
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/metric_learning.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.losses.metric_learning
+
+
+
+
+
+
+Functions of metric learning.
+
+
+
+## Functions
+
+[`pairwise_distance(...)`](../../tfa/losses/metric_learning/pairwise_distance.md): Computes the pairwise distance matrix with numerical stability.
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/metric_learning/pairwise_distance.md b/docs/api_docs/python/tfa/losses/metric_learning/pairwise_distance.md
new file mode 100644
index 0000000000..b14ff62556
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/metric_learning/pairwise_distance.md
@@ -0,0 +1,48 @@
+
+
+
+
+
+# tfa.losses.metric_learning.pairwise_distance
+
+
+
+
+
+
+
+
+Computes the pairwise distance matrix with numerical stability.
+
+``` python
+tfa.losses.metric_learning.pairwise_distance(
+ feature,
+ squared=False
+)
+```
+
+
+
+
+
+output[i, j] = || feature[i, :] - feature[j, :] ||_2
+
+#### Args:
+
+
+* `feature`: 2-D Tensor of size [number of data, feature dimension].
+* `squared`: Boolean, whether or not to square the pairwise distances.
+
+
+#### Returns:
+
+
+* `pairwise_distances`: 2-D Tensor of size [number of data, number of data].
+
diff --git a/docs/api_docs/python/tfa/losses/npairs.md b/docs/api_docs/python/tfa/losses/npairs.md
new file mode 100644
index 0000000000..832c845cb5
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/npairs.md
@@ -0,0 +1,37 @@
+
+
+
+
+
+# Module: tfa.losses.npairs
+
+
+
+
+
+
+Implements npairs loss.
+
+
+
+## Classes
+
+[`class NpairsLoss`](../../tfa/losses/NpairsLoss.md): Computes the npairs loss between `y_true` and `y_pred`.
+
+[`class NpairsMultilabelLoss`](../../tfa/losses/NpairsMultilabelLoss.md): Computes the npairs loss between multilabel data `y_true` and `y_pred`.
+
+## Functions
+
+[`npairs_loss(...)`](../../tfa/losses/npairs_loss.md): Computes the npairs loss between `y_true` and `y_pred`.
+
+[`npairs_multilabel_loss(...)`](../../tfa/losses/npairs_multilabel_loss.md): Computes the npairs loss between multilabel data `y_true` and `y_pred`.
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/npairs_loss.md b/docs/api_docs/python/tfa/losses/npairs_loss.md
new file mode 100644
index 0000000000..702b9cedc2
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/npairs_loss.md
@@ -0,0 +1,65 @@
+
+
+
+
+
+# tfa.losses.npairs_loss
+
+
+
+
+
+
+
+
+Computes the npairs loss between `y_true` and `y_pred`.
+
+**Aliases**: `tfa.losses.npairs.npairs_loss`
+
+``` python
+tfa.losses.npairs_loss(
+ y_true,
+ y_pred
+)
+```
+
+
+
+
+
+Npairs loss expects paired data where a pair is composed of samples from
+the same labels and each pairs in the minibatch have different labels.
+The loss takes each row of the pair-wise similarity matrix, `y_pred`,
+as logits and the remapped multi-class labels, `y_true`, as labels.
+
+The similarity matrix `y_pred` between two embedding matrices `a` and `b`
+with shape `[batch_size, hidden_size]` can be computed as follows:
+
+```python
+# y_pred = a * b^T
+y_pred = tf.matmul(a, b, transpose_a=False, transpose_b=True)
+```
+
+See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
+
+#### Args:
+
+
+* `y_true`: 1-D integer `Tensor` with shape `[batch_size]` of
+ multi-class labels.
+* `y_pred`: 2-D float `Tensor` with shape `[batch_size, batch_size]` of
+ similarity matrix between embedding matrices.
+
+
+#### Returns:
+
+
+* `npairs_loss`: float scalar.
+
diff --git a/docs/api_docs/python/tfa/losses/npairs_multilabel_loss.md b/docs/api_docs/python/tfa/losses/npairs_multilabel_loss.md
new file mode 100644
index 0000000000..579d35a6a8
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/npairs_multilabel_loss.md
@@ -0,0 +1,82 @@
+
+
+
+
+
+# tfa.losses.npairs_multilabel_loss
+
+
+
+
+
+
+
+
+Computes the npairs loss between multilabel data `y_true` and `y_pred`.
+
+**Aliases**: `tfa.losses.npairs.npairs_multilabel_loss`
+
+``` python
+tfa.losses.npairs_multilabel_loss(
+ y_true,
+ y_pred
+)
+```
+
+
+
+
+
+Npairs loss expects paired data where a pair is composed of samples from
+the same labels and each pairs in the minibatch have different labels.
+The loss takes each row of the pair-wise similarity matrix, `y_pred`,
+as logits and the remapped multi-class labels, `y_true`, as labels.
+
+To deal with multilabel inputs, the count of label intersection
+is computed as follows:
+
+```
+L_{i,j} = | set_of_labels_for(i) \cap set_of_labels_for(j) |
+```
+
+Each row of the count based label matrix is further normalized so that
+each row sums to one.
+
+`y_true` should be a binary indicator for classes.
+That is, if `y_true[i, j] = 1`, then `i`th sample is in `j`th class;
+if `y_true[i, j] = 0`, then `i`th sample is not in `j`th class.
+
+The similarity matrix `y_pred` between two embedding matrices `a` and `b`
+with shape `[batch_size, hidden_size]` can be computed as follows:
+
+```python
+# y_pred = a * b^T
+y_pred = tf.matmul(a, b, transpose_a=False, transpose_b=True)
+```
+
+See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
+
+#### Args:
+
+
+* `y_true`: Either 2-D integer `Tensor` with shape
+ `[batch_size, num_classes]`, or `SparseTensor` with dense shape
+ `[batch_size, num_classes]`. If `y_true` is a `SparseTensor`, then
+ it will be converted to `Tensor` via `tf.sparse.to_dense` first.
+
+* `y_pred`: 2-D float `Tensor` with shape `[batch_size, batch_size]` of
+ similarity matrix between embedding matrices.
+
+
+#### Returns:
+
+
+* `npairs_multilabel_loss`: float scalar.
+
diff --git a/docs/api_docs/python/tfa/losses/sigmoid_focal_crossentropy.md b/docs/api_docs/python/tfa/losses/sigmoid_focal_crossentropy.md
new file mode 100644
index 0000000000..961aaaf0bc
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/sigmoid_focal_crossentropy.md
@@ -0,0 +1,49 @@
+
+
+
+
+
+# tfa.losses.sigmoid_focal_crossentropy
+
+
+
+
+
+
+
+
+Args
+
+**Aliases**: `tfa.losses.focal_loss.sigmoid_focal_crossentropy`
+
+``` python
+tfa.losses.sigmoid_focal_crossentropy(
+ y_true,
+ y_pred,
+ alpha=0.25,
+ gamma=2.0,
+ from_logits=False
+)
+```
+
+
+
+
+ y_true: true targets tensor.
+ y_pred: predictions tensor.
+ alpha: balancing factor.
+ gamma: modulating factor.
+
+#### Returns:
+
+Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the
+same shape as `y_true`; otherwise, it is scalar.
+
+
diff --git a/docs/api_docs/python/tfa/losses/sparsemax_loss.md b/docs/api_docs/python/tfa/losses/sparsemax_loss.md
new file mode 100644
index 0000000000..5e1d34af90
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/sparsemax_loss.md
@@ -0,0 +1,58 @@
+
+
+
+
+
+# tfa.losses.sparsemax_loss
+
+
+
+
+
+
+
+
+Sparsemax loss function [1].
+
+``` python
+tfa.losses.sparsemax_loss(
+ logits,
+ sparsemax,
+ labels,
+ name=None
+)
+```
+
+
+
+
+
+Computes the generalized multi-label classification loss for the sparsemax
+function. The implementation is a reformulation of the original loss
+function such that it uses the sparsemax properbility output instead of the
+internal au variable. However, the output is identical to the original
+loss function.
+
+[1]: https://arxiv.org/abs/1602.02068
+
+#### Args:
+
+
+* `logits`: A `Tensor`. Must be one of the following types: `float32`,
+ `float64`.
+* `sparsemax`: A `Tensor`. Must have the same type as `logits`.
+* `labels`: A `Tensor`. Must have the same type as `logits`.
+* `name`: A name for the operation (optional).
+
+#### Returns:
+
+A `Tensor`. Has the same type as `logits`.
+
+
diff --git a/docs/api_docs/python/tfa/losses/triplet.md b/docs/api_docs/python/tfa/losses/triplet.md
new file mode 100644
index 0000000000..27f5313dfc
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/triplet.md
@@ -0,0 +1,33 @@
+
+
+
+
+
+# Module: tfa.losses.triplet
+
+
+
+
+
+
+Implements triplet loss.
+
+
+
+## Classes
+
+[`class TripletSemiHardLoss`](../../tfa/losses/TripletSemiHardLoss.md): Computes the triplet loss with semi-hard negative mining.
+
+## Functions
+
+[`triplet_semihard_loss(...)`](../../tfa/losses/triplet_semihard_loss.md): Computes the triplet loss with semi-hard negative mining.
+
+
+
diff --git a/docs/api_docs/python/tfa/losses/triplet_semihard_loss.md b/docs/api_docs/python/tfa/losses/triplet_semihard_loss.md
new file mode 100644
index 0000000000..e7caeb0336
--- /dev/null
+++ b/docs/api_docs/python/tfa/losses/triplet_semihard_loss.md
@@ -0,0 +1,47 @@
+
+
+
+
+
+# tfa.losses.triplet_semihard_loss
+
+
+
+
+
+
+
+
+Computes the triplet loss with semi-hard negative mining.
+
+**Aliases**: `tfa.losses.triplet.triplet_semihard_loss`
+
+``` python
+tfa.losses.triplet_semihard_loss(
+ y_true,
+ y_pred,
+ margin=1.0
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `y_true`: 1-D integer `Tensor` with shape [batch_size] of
+ multiclass integer labels.
+* `y_pred`: 2-D float `Tensor` of embedding vectors. Embeddings should
+ be l2 normalized.
+* `margin`: Float, margin term in the loss definition.
+
diff --git a/docs/api_docs/python/tfa/metrics.md b/docs/api_docs/python/tfa/metrics.md
new file mode 100644
index 0000000000..6a51c2b3f2
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics.md
@@ -0,0 +1,63 @@
+
+
+
+
+
+# Module: tfa.metrics
+
+
+
+
+
+
+Additional metrics that conform to Keras API.
+
+
+
+## Modules
+
+[`cohens_kappa`](../tfa/metrics/cohens_kappa.md) module: Implements Cohen's Kappa.
+
+[`f_scores`](../tfa/metrics/f_scores.md) module: Implements F scores.
+
+[`hamming`](../tfa/metrics/hamming.md) module: Implements Hamming distance and loss.
+
+[`matthews_correlation_coefficient`](../tfa/metrics/matthews_correlation_coefficient.md) module: Matthews Correlation Coefficient Implementation.
+
+[`multilabel_confusion_matrix`](../tfa/metrics/multilabel_confusion_matrix.md) module: Implements Multi-label confusion matrix scores.
+
+[`r_square`](../tfa/metrics/r_square.md) module: Implements R^2 scores.
+
+[`utils`](../tfa/metrics/utils.md) module: Utilities for metrics.
+
+## Classes
+
+[`class CohenKappa`](../tfa/metrics/CohenKappa.md): Computes Kappa score between two raters.
+
+[`class F1Score`](../tfa/metrics/F1Score.md): Computes F-1 Score.
+
+[`class FBetaScore`](../tfa/metrics/FBetaScore.md): Computes F-Beta score.
+
+[`class HammingLoss`](../tfa/metrics/HammingLoss.md): Computes hamming loss.
+
+[`class MatthewsCorrelationCoefficient`](../tfa/metrics/MatthewsCorrelationCoefficient.md): Computes the Matthews Correlation Coefficient.
+
+[`class MeanMetricWrapper`](../tfa/metrics/MeanMetricWrapper.md): Wraps a stateless metric function with the Mean metric.
+
+[`class MultiLabelConfusionMatrix`](../tfa/metrics/MultiLabelConfusionMatrix.md): Computes Multi-label confusion matrix.
+
+[`class RSquare`](../tfa/metrics/RSquare.md): Compute R^2 score.
+
+## Functions
+
+[`hamming_distance(...)`](../tfa/metrics/hamming_distance.md): Computes hamming distance.
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/CohenKappa.md b/docs/api_docs/python/tfa/metrics/CohenKappa.md
new file mode 100644
index 0000000000..98f1613eff
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/CohenKappa.md
@@ -0,0 +1,926 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.metrics.CohenKappa
+
+
+
+
+
+
+
+
+## Class `CohenKappa`
+
+Computes Kappa score between two raters.
+
+
+
+**Aliases**: `tfa.metrics.cohens_kappa.CohenKappa`
+
+
+
+The score lies in the range [-1, 1]. A score of -1 represents
+complete disagreement between two raters whereas a score of 1
+represents complete agreement between the two raters.
+A score of 0 means agreement by chance.
+
+Note: As of now, this implementation considers all labels
+while calculating the Cohen's Kappa score.
+
+#### Usage:
+
+
+
+```python
+actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32)
+preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32)
+weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32)
+
+m = tfa.metrics.CohenKappa(num_classes=5)
+m.update_state(actuals, preds)
+print('Final result: ', m.result().numpy()) # Result: 0.61904764
+
+# To use this with weights, sample_weight argument can be used.
+m = tfa.metrics.CohenKappa(num_classes=5)
+m.update_state(actuals, preds, sample_weight=weights)
+print('Final result: ', m.result().numpy()) # Result: 0.37209308
+```
+
+Usage with tf.keras API:
+
+```python
+model = tf.keras.models.Model(inputs, outputs)
+model.add_metric(tfa.metrics.CohenKappa(num_classes=5)(outputs))
+model.compile('sgd', loss='mse')
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ num_classes,
+ name='cohen_kappa',
+ weightage=None,
+ dtype=None
+)
+```
+
+Creates a `CohenKappa` instance.
+
+
+#### Args:
+
+
+* `num_classes`: Number of unique classes in your dataset.
+* `name`: (Optional) String name of the metric instance.
+* `weightage`: (Optional) Weighting to be considered for calculating
+ kappa statistics. A valid value is one of
+ [None, 'linear', 'quadratic']. Defaults to `None`.
+* `dtype`: (Optional) Data type of the metric result.
+ Defaults to `None`.
+
+
+#### Raises:
+
+
+* `ValueError`: If the value passed for `weightage` is invalid
+ i.e. not any one of [None, 'linear', 'quadratic']
+
+__new__
+
+``` python
+__new__(
+ cls,
+ *args,
+ **kwargs
+)
+```
+
+Create and return a new object. See help(type) for accurate signature.
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ *args,
+ **kwargs
+)
+```
+
+Accumulates statistics and then computes metric result value.
+
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric,
+ passed on to `update_state()`.
+
+
+#### Returns:
+
+The metric value tensor.
+
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the serializable config of the metric.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+reset_states
+
+View source
+
+``` python
+reset_states()
+```
+
+Resets all of the metric state variables.
+
+
+result
+
+View source
+
+``` python
+result()
+```
+
+Computes and returns the metric value tensor.
+
+Result computation is an idempotent operation that simply calculates the
+metric value using the state variables.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+update_state
+
+View source
+
+``` python
+update_state(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Accumulates the confusion matrix condition statistics.
+
+
+#### Args:
+
+
+* `y_true`: Labels assigned by the first annotator with shape
+ `[num_samples,]`.
+* `y_pred`: Labels assigned by the second annotator with shape
+ `[num_samples,]`. The kappa statistic is symmetric,
+ so swapping `y_true` and `y_pred` doesn't change the value.
+sample_weight (optional): for weighting labels in confusion matrix
+ Defaults to `None`. The dtype for weights should be the same
+ as the dtype for confusion matrix. For more details,
+ please check `tf.math.confusion_matrix`.
+
+
+#### Returns:
+
+Update op.
+
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/F1Score.md b/docs/api_docs/python/tfa/metrics/F1Score.md
new file mode 100644
index 0000000000..81a9c8be7d
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/F1Score.md
@@ -0,0 +1,918 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.metrics.F1Score
+
+
+
+
+
+
+
+
+## Class `F1Score`
+
+Computes F-1 Score.
+
+Inherits From: [`FBetaScore`](../../tfa/metrics/FBetaScore.md)
+
+**Aliases**: `tfa.metrics.f_scores.F1Score`
+
+
+
+It is the harmonic mean of precision and recall.
+Output range is [0, 1]. Works for both multi-class
+and multi-label classification.
+
+F-1 = 2 * (precision * recall) / (precision + recall)
+
+#### Args:
+
+
+* `num_classes`: Number of unique classes in the dataset.
+* `average`: Type of averaging to be performed on data.
+ Acceptable values are `None`, `micro`, `macro`
+ and `weighted`. Default value is None.
+* `threshold`: Elements of `y_pred` above threshold are
+ considered to be 1, and the rest 0. If threshold is
+ None, the argmax is converted to 1, and the rest 0.
+
+
+#### Returns:
+
+F-1 Score: float
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the `average` has values other than
+[None, micro, macro, weighted].
+
+`average` parameter behavior:
+ None: Scores for each class are returned
+
+ micro: True positivies, false positives and
+ false negatives are computed globally.
+
+ macro: True positivies, false positives and
+ false negatives are computed for each class
+ and their unweighted mean is returned.
+
+ weighted: Metrics are computed for each class
+ and returns the mean weighted by the
+ number of true instances in each class.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ num_classes,
+ average=None,
+ threshold=None,
+ name='f1_score',
+ dtype=tf.float32
+)
+```
+
+
+
+
+__new__
+
+``` python
+__new__(
+ cls,
+ *args,
+ **kwargs
+)
+```
+
+Create and return a new object. See help(type) for accurate signature.
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ *args,
+ **kwargs
+)
+```
+
+Accumulates statistics and then computes metric result value.
+
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric,
+ passed on to `update_state()`.
+
+
+#### Returns:
+
+The metric value tensor.
+
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the serializable config of the metric.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+reset_states
+
+View source
+
+``` python
+reset_states()
+```
+
+Resets all of the metric state variables.
+
+This function is called between epochs/steps,
+when a metric is evaluated during training.
+
+result
+
+View source
+
+``` python
+result()
+```
+
+Computes and returns the metric value tensor.
+
+Result computation is an idempotent operation that simply calculates the
+metric value using the state variables.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+update_state
+
+View source
+
+``` python
+update_state(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Accumulates statistics for the metric.
+
+Note: This function is executed as a graph function in graph mode.
+This means:
+ a) Operations on the same resource are executed in textual order.
+ This should make it easier to do things like add the updated
+ value of a variable to another, for example.
+ b) You don't need to worry about collecting the update ops to execute.
+ All update ops added to the graph by this function will be executed.
+ As a result, code should generally work the same way with graph or
+ eager execution.
+
+Please use `tf.config.experimental_run_functions_eagerly(True)` to execute
+this function eagerly for debugging or profiling.
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/FBetaScore.md b/docs/api_docs/python/tfa/metrics/FBetaScore.md
new file mode 100644
index 0000000000..fe1bbed2d0
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/FBetaScore.md
@@ -0,0 +1,925 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.metrics.FBetaScore
+
+
+
+
+
+
+
+
+## Class `FBetaScore`
+
+Computes F-Beta score.
+
+
+
+**Aliases**: `tfa.metrics.f_scores.FBetaScore`
+
+
+
+It is the weighted harmonic mean of precision
+and recall. Output range is [0, 1]. Works for
+both multi-class and multi-label classification.
+
+F-Beta = (1 + beta^2) * (prec * recall) / ((beta^2 * prec) + recall)
+
+#### Args:
+
+
+* `num_classes`: Number of unique classes in the dataset.
+* `average`: Type of averaging to be performed on data.
+ Acceptable values are `None`, `micro`, `macro` and
+ `weighted`. Default value is None.
+* `beta`: Determines the weight of precision and recall
+ in harmonic mean. Determines the weight given to the
+ precision and recall. Default value is 1.
+* `threshold`: Elements of `y_pred` greater than threshold are
+ converted to be 1, and the rest 0. If threshold is
+ None, the argmax is converted to 1, and the rest 0.
+
+
+#### Returns:
+
+F-Beta Score: float
+
+
+
+#### Raises:
+
+
+* `ValueError`: If the `average` has values other than
+[None, micro, macro, weighted].
+
+* `ValueError`: If the `beta` value is less than or equal
+to 0.
+
+`average` parameter behavior:
+ None: Scores for each class are returned
+
+ micro: True positivies, false positives and
+ false negatives are computed globally.
+
+ macro: True positivies, false positives and
+ false negatives are computed for each class
+ and their unweighted mean is returned.
+
+ weighted: Metrics are computed for each class
+ and returns the mean weighted by the
+ number of true instances in each class.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ num_classes,
+ average=None,
+ beta=1.0,
+ threshold=None,
+ name='fbeta_score',
+ dtype=tf.float32
+)
+```
+
+
+
+
+__new__
+
+``` python
+__new__(
+ cls,
+ *args,
+ **kwargs
+)
+```
+
+Create and return a new object. See help(type) for accurate signature.
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ *args,
+ **kwargs
+)
+```
+
+Accumulates statistics and then computes metric result value.
+
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric,
+ passed on to `update_state()`.
+
+
+#### Returns:
+
+The metric value tensor.
+
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the serializable config of the metric.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+reset_states
+
+View source
+
+``` python
+reset_states()
+```
+
+Resets all of the metric state variables.
+
+This function is called between epochs/steps,
+when a metric is evaluated during training.
+
+result
+
+View source
+
+``` python
+result()
+```
+
+Computes and returns the metric value tensor.
+
+Result computation is an idempotent operation that simply calculates the
+metric value using the state variables.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+update_state
+
+View source
+
+``` python
+update_state(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Accumulates statistics for the metric.
+
+Note: This function is executed as a graph function in graph mode.
+This means:
+ a) Operations on the same resource are executed in textual order.
+ This should make it easier to do things like add the updated
+ value of a variable to another, for example.
+ b) You don't need to worry about collecting the update ops to execute.
+ All update ops added to the graph by this function will be executed.
+ As a result, code should generally work the same way with graph or
+ eager execution.
+
+Please use `tf.config.experimental_run_functions_eagerly(True)` to execute
+this function eagerly for debugging or profiling.
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/HammingLoss.md b/docs/api_docs/python/tfa/metrics/HammingLoss.md
new file mode 100644
index 0000000000..a1d5c290ca
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/HammingLoss.md
@@ -0,0 +1,871 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.metrics.HammingLoss
+
+
+
+
+
+
+
+
+## Class `HammingLoss`
+
+Computes hamming loss.
+
+Inherits From: [`MeanMetricWrapper`](../../tfa/metrics/MeanMetricWrapper.md)
+
+**Aliases**: `tfa.metrics.hamming.HammingLoss`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ mode,
+ name='hamming_loss',
+ threshold=None,
+ dtype=tf.float32
+)
+```
+
+Creates a `MeanMetricWrapper` instance.
+
+
+#### Args:
+
+
+* `fn`: The metric function to wrap, with signature
+ `fn(y_true, y_pred, **kwargs)`.
+* `name`: (Optional) string name of the metric instance.
+* `dtype`: (Optional) data type of the metric result.
+* `**kwargs`: The keyword arguments that are passed on to `fn`.
+
+__new__
+
+``` python
+__new__(
+ cls,
+ *args,
+ **kwargs
+)
+```
+
+Create and return a new object. See help(type) for accurate signature.
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ *args,
+ **kwargs
+)
+```
+
+Accumulates statistics and then computes metric result value.
+
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric,
+ passed on to `update_state()`.
+
+
+#### Returns:
+
+The metric value tensor.
+
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the serializable config of the metric.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+reset_states
+
+``` python
+reset_states()
+```
+
+Resets all of the metric state variables.
+
+This function is called between epochs/steps,
+when a metric is evaluated during training.
+
+result
+
+``` python
+result()
+```
+
+Computes and returns the metric value tensor.
+
+Result computation is an idempotent operation that simply calculates the
+metric value using the state variables.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+update_state
+
+View source
+
+``` python
+update_state(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Accumulates metric statistics.
+
+`y_true` and `y_pred` should have the same shape.
+Args:
+ y_true: The ground truth values.
+ y_pred: The predicted values.
+ sample_weight: Optional weighting of each example. Defaults to 1.
+ Can be a `Tensor` whose rank is either 0, or the same rank as
+ `y_true`, and must be broadcastable to `y_true`.
+Returns:
+ Update op.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/MatthewsCorrelationCoefficient.md b/docs/api_docs/python/tfa/metrics/MatthewsCorrelationCoefficient.md
new file mode 100644
index 0000000000..42c91c3abb
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/MatthewsCorrelationCoefficient.md
@@ -0,0 +1,910 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.metrics.MatthewsCorrelationCoefficient
+
+
+
+
+
+
+
+
+## Class `MatthewsCorrelationCoefficient`
+
+Computes the Matthews Correlation Coefficient.
+
+
+
+**Aliases**: `tfa.metrics.matthews_correlation_coefficient.MatthewsCorrelationCoefficient`
+
+
+
+The statistic is also known as the phi coefficient.
+The Matthews correlation coefficient (MCC) is used in
+machine learning as a measure of the quality of binary
+and multiclass classifications. It takes into account
+true and false positives and negatives and is generally
+regarded as a balanced measure which can be used even
+if the classes are of very different sizes. The correlation
+coefficient value of MCC is between -1 and +1. A
+coefficient of +1 represents a perfect prediction,
+0 an average random prediction and -1 an inverse
+prediction. The statistic is also known as
+the phi coefficient.
+
+MCC = (TP * TN) - (FP * FN) /
+ ((TP + FP) * (TP + FN) * (TN + FP ) * (TN + FN))^(1/2)
+
+#### Usage:
+
+
+```python
+actuals = tf.constant([[1.0], [1.0], [1.0], [0.0]],
+ dtype=tf.float32)
+preds = tf.constant([[1.0], [0.0], [1.0], [1.0]],
+ dtype=tf.float32)
+# Matthews correlation coefficient
+mcc = MatthewsCorrelationCoefficient(num_classes=1)
+mcc.update_state(actuals, preds)
+print('Matthews correlation coefficient is:',
+mcc.result().numpy())
+# Matthews correlation coefficient is : -0.33333334
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ num_classes=None,
+ name='MatthewsCorrelationCoefficient',
+ dtype=tf.float32
+)
+```
+
+Creates a Matthews Correlation Coefficient instanse.
+
+
+#### Args:
+
+
+* `num_classes`: Number of unique classes in the dataset.
+* `name`: (Optional) String name of the metric instance.
+* `dtype`: (Optional) Data type of the metric result.
+Defaults to `tf.float32`.
+
+__new__
+
+``` python
+__new__(
+ cls,
+ *args,
+ **kwargs
+)
+```
+
+Create and return a new object. See help(type) for accurate signature.
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ *args,
+ **kwargs
+)
+```
+
+Accumulates statistics and then computes metric result value.
+
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric,
+ passed on to `update_state()`.
+
+
+#### Returns:
+
+The metric value tensor.
+
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the serializable config of the metric.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+reset_states
+
+View source
+
+``` python
+reset_states()
+```
+
+Resets all of the metric state variables.
+
+
+result
+
+View source
+
+``` python
+result()
+```
+
+Computes and returns the metric value tensor.
+
+Result computation is an idempotent operation that simply calculates the
+metric value using the state variables.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+update_state
+
+View source
+
+``` python
+update_state(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Accumulates statistics for the metric.
+
+Note: This function is executed as a graph function in graph mode.
+This means:
+ a) Operations on the same resource are executed in textual order.
+ This should make it easier to do things like add the updated
+ value of a variable to another, for example.
+ b) You don't need to worry about collecting the update ops to execute.
+ All update ops added to the graph by this function will be executed.
+ As a result, code should generally work the same way with graph or
+ eager execution.
+
+Please use `tf.config.experimental_run_functions_eagerly(True)` to execute
+this function eagerly for debugging or profiling.
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/MeanMetricWrapper.md b/docs/api_docs/python/tfa/metrics/MeanMetricWrapper.md
new file mode 100644
index 0000000000..98f5791939
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/MeanMetricWrapper.md
@@ -0,0 +1,871 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.metrics.MeanMetricWrapper
+
+
+
+
+
+
+
+
+## Class `MeanMetricWrapper`
+
+Wraps a stateless metric function with the Mean metric.
+
+
+
+**Aliases**: `tfa.metrics.utils.MeanMetricWrapper`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ fn,
+ name=None,
+ dtype=None,
+ **kwargs
+)
+```
+
+Creates a `MeanMetricWrapper` instance.
+
+
+#### Args:
+
+
+* `fn`: The metric function to wrap, with signature
+ `fn(y_true, y_pred, **kwargs)`.
+* `name`: (Optional) string name of the metric instance.
+* `dtype`: (Optional) data type of the metric result.
+* `**kwargs`: The keyword arguments that are passed on to `fn`.
+
+__new__
+
+``` python
+__new__(
+ cls,
+ *args,
+ **kwargs
+)
+```
+
+Create and return a new object. See help(type) for accurate signature.
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ *args,
+ **kwargs
+)
+```
+
+Accumulates statistics and then computes metric result value.
+
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric,
+ passed on to `update_state()`.
+
+
+#### Returns:
+
+The metric value tensor.
+
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the serializable config of the metric.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+reset_states
+
+``` python
+reset_states()
+```
+
+Resets all of the metric state variables.
+
+This function is called between epochs/steps,
+when a metric is evaluated during training.
+
+result
+
+``` python
+result()
+```
+
+Computes and returns the metric value tensor.
+
+Result computation is an idempotent operation that simply calculates the
+metric value using the state variables.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+update_state
+
+View source
+
+``` python
+update_state(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Accumulates metric statistics.
+
+`y_true` and `y_pred` should have the same shape.
+Args:
+ y_true: The ground truth values.
+ y_pred: The predicted values.
+ sample_weight: Optional weighting of each example. Defaults to 1.
+ Can be a `Tensor` whose rank is either 0, or the same rank as
+ `y_true`, and must be broadcastable to `y_true`.
+Returns:
+ Update op.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/MultiLabelConfusionMatrix.md b/docs/api_docs/python/tfa/metrics/MultiLabelConfusionMatrix.md
new file mode 100644
index 0000000000..955e7950d2
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/MultiLabelConfusionMatrix.md
@@ -0,0 +1,914 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.metrics.MultiLabelConfusionMatrix
+
+
+
+
+
+
+
+
+## Class `MultiLabelConfusionMatrix`
+
+Computes Multi-label confusion matrix.
+
+
+
+**Aliases**: `tfa.metrics.multilabel_confusion_matrix.MultiLabelConfusionMatrix`
+
+
+
+Class-wise confusion matrix is computed for the
+evaluation of classification.
+
+If multi-class input is provided, it will be treated
+as multilabel data.
+
+Consider classification problem with two classes
+(i.e num_classes=2).
+
+Resultant matrix `M` will be in the shape of (num_classes, 2, 2).
+
+Every class `i` has a dedicated 2*2 matrix that contains:
+
+- true negatives for class i in M(0,0)
+- false positives for class i in M(0,1)
+- false negatives for class i in M(1,0)
+- true positives for class i in M(1,1)
+
+```python
+# multilabel confusion matrix
+y_true = tf.constant([[1, 0, 1], [0, 1, 0]],
+ dtype=tf.int32)
+y_pred = tf.constant([[1, 0, 0],[0, 1, 1]],
+ dtype=tf.int32)
+output = MultiLabelConfusionMatrix(num_classes=3)
+output.update_state(y_true, y_pred)
+print('Confusion matrix:', output.result().numpy())
+
+# Confusion matrix: [[[1 0] [0 1]] [[1 0] [0 1]]
+ [[0 1] [1 0]]]
+
+# if multiclass input is provided
+y_true = tf.constant([[1, 0, 0], [0, 1, 0]],
+ dtype=tf.int32)
+y_pred = tf.constant([[1, 0, 0],[0, 0, 1]],
+ dtype=tf.int32)
+output = MultiLabelConfusionMatrix(num_classes=3)
+output.update_state(y_true, y_pred)
+print('Confusion matrix:', output.result().numpy())
+
+# Confusion matrix: [[[1 0] [0 1]] [[1 0] [1 0]] [[1 1] [0 0]]]
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ num_classes,
+ name='Multilabel_confusion_matrix',
+ dtype=tf.int32
+)
+```
+
+
+
+
+__new__
+
+``` python
+__new__(
+ cls,
+ *args,
+ **kwargs
+)
+```
+
+Create and return a new object. See help(type) for accurate signature.
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ *args,
+ **kwargs
+)
+```
+
+Accumulates statistics and then computes metric result value.
+
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric,
+ passed on to `update_state()`.
+
+
+#### Returns:
+
+The metric value tensor.
+
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the serializable config of the metric.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+reset_states
+
+View source
+
+``` python
+reset_states()
+```
+
+Resets all of the metric state variables.
+
+This function is called between epochs/steps,
+when a metric is evaluated during training.
+
+result
+
+View source
+
+``` python
+result()
+```
+
+Computes and returns the metric value tensor.
+
+Result computation is an idempotent operation that simply calculates the
+metric value using the state variables.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+update_state
+
+View source
+
+``` python
+update_state(
+ y_true,
+ y_pred
+)
+```
+
+Accumulates statistics for the metric.
+
+Note: This function is executed as a graph function in graph mode.
+This means:
+ a) Operations on the same resource are executed in textual order.
+ This should make it easier to do things like add the updated
+ value of a variable to another, for example.
+ b) You don't need to worry about collecting the update ops to execute.
+ All update ops added to the graph by this function will be executed.
+ As a result, code should generally work the same way with graph or
+ eager execution.
+
+Please use `tf.config.experimental_run_functions_eagerly(True)` to execute
+this function eagerly for debugging or profiling.
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/RSquare.md b/docs/api_docs/python/tfa/metrics/RSquare.md
new file mode 100644
index 0000000000..038828c7b3
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/RSquare.md
@@ -0,0 +1,886 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.metrics.RSquare
+
+
+
+
+
+
+
+
+## Class `RSquare`
+
+Compute R^2 score.
+
+
+
+**Aliases**: `tfa.metrics.r_square.RSquare`
+
+
+
+ This is also called as coefficient of determination.
+ It tells how close are data to the fitted regression line.
+
+ - Highest score can be 1.0 and it indicates that the predictors
+ perfectly accounts for variation in the target.
+ - Score 0.0 indicates that the predictors do not
+ account for variation in the target.
+ - It can also be negative if the model is worse.
+
+ Usage:
+ ```python
+ actuals = tf.constant([1, 4, 3], dtype=tf.float32)
+ preds = tf.constant([2, 4, 4], dtype=tf.float32)
+ result = tf.keras.metrics.RSquare()
+ result.update_state(actuals, preds)
+ print('R^2 score is: ', r1.result().numpy()) # 0.57142866
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ name='r_square',
+ dtype=tf.float32
+)
+```
+
+
+
+
+__new__
+
+``` python
+__new__(
+ cls,
+ *args,
+ **kwargs
+)
+```
+
+Create and return a new object. See help(type) for accurate signature.
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ *args,
+ **kwargs
+)
+```
+
+Accumulates statistics and then computes metric result value.
+
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric,
+ passed on to `update_state()`.
+
+
+#### Returns:
+
+The metric value tensor.
+
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+Returns the serializable config of the metric.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+reset_states
+
+View source
+
+``` python
+reset_states()
+```
+
+Resets all of the metric state variables.
+
+This function is called between epochs/steps,
+when a metric is evaluated during training.
+
+result
+
+View source
+
+``` python
+result()
+```
+
+Computes and returns the metric value tensor.
+
+Result computation is an idempotent operation that simply calculates the
+metric value using the state variables.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+update_state
+
+View source
+
+``` python
+update_state(
+ y_true,
+ y_pred
+)
+```
+
+Accumulates statistics for the metric.
+
+Note: This function is executed as a graph function in graph mode.
+This means:
+ a) Operations on the same resource are executed in textual order.
+ This should make it easier to do things like add the updated
+ value of a variable to another, for example.
+ b) You don't need to worry about collecting the update ops to execute.
+ All update ops added to the graph by this function will be executed.
+ As a result, code should generally work the same way with graph or
+ eager execution.
+
+Please use `tf.config.experimental_run_functions_eagerly(True)` to execute
+this function eagerly for debugging or profiling.
+
+#### Args:
+
+
+* `*args`: * `**kwargs`: A mini-batch of inputs to the Metric.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/cohens_kappa.md b/docs/api_docs/python/tfa/metrics/cohens_kappa.md
new file mode 100644
index 0000000000..bfdbe29e9e
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/cohens_kappa.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.metrics.cohens_kappa
+
+
+
+
+
+
+Implements Cohen's Kappa.
+
+
+
+## Classes
+
+[`class CohenKappa`](../../tfa/metrics/CohenKappa.md): Computes Kappa score between two raters.
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/f_scores.md b/docs/api_docs/python/tfa/metrics/f_scores.md
new file mode 100644
index 0000000000..378366d95a
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/f_scores.md
@@ -0,0 +1,31 @@
+
+
+
+
+
+# Module: tfa.metrics.f_scores
+
+
+
+
+
+
+Implements F scores.
+
+
+
+## Classes
+
+[`class F1Score`](../../tfa/metrics/F1Score.md): Computes F-1 Score.
+
+[`class FBetaScore`](../../tfa/metrics/FBetaScore.md): Computes F-Beta score.
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/hamming.md b/docs/api_docs/python/tfa/metrics/hamming.md
new file mode 100644
index 0000000000..d24c021fa3
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/hamming.md
@@ -0,0 +1,35 @@
+
+
+
+
+
+# Module: tfa.metrics.hamming
+
+
+
+
+
+
+Implements Hamming distance and loss.
+
+
+
+## Classes
+
+[`class HammingLoss`](../../tfa/metrics/HammingLoss.md): Computes hamming loss.
+
+## Functions
+
+[`hamming_distance(...)`](../../tfa/metrics/hamming_distance.md): Computes hamming distance.
+
+[`hamming_loss_fn(...)`](../../tfa/metrics/hamming/hamming_loss_fn.md): Computes hamming loss.
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/hamming/hamming_loss_fn.md b/docs/api_docs/python/tfa/metrics/hamming/hamming_loss_fn.md
new file mode 100644
index 0000000000..9ec55e2ccd
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/hamming/hamming_loss_fn.md
@@ -0,0 +1,93 @@
+
+
+
+
+
+# tfa.metrics.hamming.hamming_loss_fn
+
+
+
+
+
+
+
+
+Computes hamming loss.
+
+``` python
+tfa.metrics.hamming.hamming_loss_fn(
+ y_true,
+ y_pred,
+ threshold,
+ mode
+)
+```
+
+
+
+
+
+Hamming loss is the fraction of wrong labels to the total number
+of labels.
+
+In multi-class classification, hamming loss is calculated as the
+hamming distance between `actual` and `predictions`.
+In multi-label classification, hamming loss penalizes only the
+individual labels.
+
+#### Args:
+
+
+
+* `y_true`: actual target value
+* `y_pred`: predicted target value
+* `threshold`: Elements of `y_pred` greater than threshold are
+ converted to be 1, and the rest 0. If threshold is
+ None, the argmax is converted to 1, and the rest 0.
+* `mode`: multi-class or multi-label
+
+
+#### Returns:
+
+
+hamming loss: float
+
+
+
+#### Usage:
+
+
+
+```python
+# multi-class hamming loss
+hl = HammingLoss(mode='multiclass', threshold=0.6)
+actuals = tf.constant([[1, 0, 0, 0],[0, 0, 1, 0],
+ [0, 0, 0, 1],[0, 1, 0, 0]],
+ dtype=tf.float32)
+predictions = tf.constant([[0.8, 0.1, 0.1, 0],
+ [0.2, 0, 0.8, 0],
+ [0.05, 0.05, 0.1, 0.8],
+ [1, 0, 0, 0]],
+ dtype=tf.float32)
+hl.update_state(actuals, predictions)
+print('Hamming loss: ', hl.result().numpy()) # 0.25
+
+# multi-label hamming loss
+hl = HammingLoss(mode='multilabel', threshold=0.8)
+actuals = tf.constant([[1, 0, 1, 0],[0, 1, 0, 1],
+ [0, 0, 0,1]], dtype=tf.int32)
+predictions = tf.constant([[0.82, 0.5, 0.90, 0],
+ [0, 1, 0.4, 0.98],
+ [0.89, 0.79, 0, 0.3]],
+ dtype=tf.float32)
+hl.update_state(actuals, predictions)
+print('Hamming loss: ', hl.result().numpy()) # 0.16666667
+```
+
diff --git a/docs/api_docs/python/tfa/metrics/hamming_distance.md b/docs/api_docs/python/tfa/metrics/hamming_distance.md
new file mode 100644
index 0000000000..c5a39e808b
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/hamming_distance.md
@@ -0,0 +1,66 @@
+
+
+
+
+
+# tfa.metrics.hamming_distance
+
+
+
+
+
+
+
+
+Computes hamming distance.
+
+**Aliases**: `tfa.metrics.hamming.hamming_distance`
+
+``` python
+tfa.metrics.hamming_distance(
+ actuals,
+ predictions
+)
+```
+
+
+
+
+
+Hamming distance is for comparing two binary strings.
+It is the number of bit positions in which two bits
+are different.
+
+#### Args:
+
+
+* `actuals`: actual target value
+* `predictions`: predicted value
+
+
+#### Returns:
+
+hamming distance: float
+
+
+
+#### Usage:
+
+
+
+```python
+actuals = tf.constant([1, 1, 0, 0, 1, 0, 1, 0, 0, 1],
+ dtype=tf.int32)
+predictions = tf.constant([1, 0, 0, 0, 1, 0, 0, 1, 0, 1],
+ dtype=tf.int32)
+result = hamming_distance(actuals, predictions)
+print('Hamming distance: ', result.numpy())
+```
+
diff --git a/docs/api_docs/python/tfa/metrics/matthews_correlation_coefficient.md b/docs/api_docs/python/tfa/metrics/matthews_correlation_coefficient.md
new file mode 100644
index 0000000000..bce309fd02
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/matthews_correlation_coefficient.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.metrics.matthews_correlation_coefficient
+
+
+
+
+
+
+Matthews Correlation Coefficient Implementation.
+
+
+
+## Classes
+
+[`class MatthewsCorrelationCoefficient`](../../tfa/metrics/MatthewsCorrelationCoefficient.md): Computes the Matthews Correlation Coefficient.
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/multilabel_confusion_matrix.md b/docs/api_docs/python/tfa/metrics/multilabel_confusion_matrix.md
new file mode 100644
index 0000000000..369ace8913
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/multilabel_confusion_matrix.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.metrics.multilabel_confusion_matrix
+
+
+
+
+
+
+Implements Multi-label confusion matrix scores.
+
+
+
+## Classes
+
+[`class MultiLabelConfusionMatrix`](../../tfa/metrics/MultiLabelConfusionMatrix.md): Computes Multi-label confusion matrix.
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/r_square.md b/docs/api_docs/python/tfa/metrics/r_square.md
new file mode 100644
index 0000000000..02081c688a
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/r_square.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.metrics.r_square
+
+
+
+
+
+
+Implements R^2 scores.
+
+
+
+## Classes
+
+[`class RSquare`](../../tfa/metrics/RSquare.md): Compute R^2 score.
+
+
+
diff --git a/docs/api_docs/python/tfa/metrics/utils.md b/docs/api_docs/python/tfa/metrics/utils.md
new file mode 100644
index 0000000000..939ee56f56
--- /dev/null
+++ b/docs/api_docs/python/tfa/metrics/utils.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.metrics.utils
+
+
+
+
+
+
+Utilities for metrics.
+
+
+
+## Classes
+
+[`class MeanMetricWrapper`](../../tfa/metrics/MeanMetricWrapper.md): Wraps a stateless metric function with the Mean metric.
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers.md b/docs/api_docs/python/tfa/optimizers.md
new file mode 100644
index 0000000000..1c31285d25
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers.md
@@ -0,0 +1,81 @@
+
+
+
+
+
+# Module: tfa.optimizers
+
+
+
+
+
+
+Additional optimizers that conform to Keras API.
+
+
+
+## Modules
+
+[`average_wrapper`](../tfa/optimizers/average_wrapper.md) module
+
+[`conditional_gradient`](../tfa/optimizers/conditional_gradient.md) module: Conditional Gradient optimizer.
+
+[`cyclical_learning_rate`](../tfa/optimizers/cyclical_learning_rate.md) module: Cyclical Learning Rate Schedule policies for TensorFlow.
+
+[`lamb`](../tfa/optimizers/lamb.md) module: Layer-wise Adaptive Moments (LAMB) optimizer.
+
+[`lazy_adam`](../tfa/optimizers/lazy_adam.md) module: Variant of the Adam optimizer that handles sparse updates more efficiently.
+
+[`lookahead`](../tfa/optimizers/lookahead.md) module
+
+[`moving_average`](../tfa/optimizers/moving_average.md) module
+
+[`rectified_adam`](../tfa/optimizers/rectified_adam.md) module: Rectified Adam (RAdam) optimizer.
+
+[`stochastic_weight_averaging`](../tfa/optimizers/stochastic_weight_averaging.md) module: An implementation of the Stochastic Weight Averaging optimizer.
+
+[`weight_decay_optimizers`](../tfa/optimizers/weight_decay_optimizers.md) module: Base class to make optimizers weight decay ready.
+
+## Classes
+
+[`class AdamW`](../tfa/optimizers/AdamW.md): Optimizer that implements the Adam algorithm with weight decay.
+
+[`class AveragedOptimizerWrapper`](../tfa/optimizers/AveragedOptimizerWrapper.md): Updated base class for optimizers.
+
+[`class ConditionalGradient`](../tfa/optimizers/ConditionalGradient.md): Optimizer that implements the Conditional Gradient optimization.
+
+[`class CyclicalLearningRate`](../tfa/optimizers/CyclicalLearningRate.md): A LearningRateSchedule that uses cyclical schedule.
+
+[`class ExponentialCyclicalLearningRate`](../tfa/optimizers/ExponentialCyclicalLearningRate.md): A LearningRateSchedule that uses cyclical schedule.
+
+[`class LAMB`](../tfa/optimizers/LAMB.md): Optimizer that implements the Layer-wise Adaptive Moments (LAMB).
+
+[`class LazyAdam`](../tfa/optimizers/LazyAdam.md): Variant of the Adam optimizer that handles sparse updates more
+
+[`class Lookahead`](../tfa/optimizers/Lookahead.md): This class allows to extend optimizers with the lookahead mechanism.
+
+[`class MovingAverage`](../tfa/optimizers/MovingAverage.md): Optimizer that computes a moving average of the variables.
+
+[`class RectifiedAdam`](../tfa/optimizers/RectifiedAdam.md): Variant of the Adam optimizer whose adaptive learning rate is rectified
+
+[`class SGDW`](../tfa/optimizers/SGDW.md): Optimizer that implements the Momentum algorithm with weight_decay.
+
+[`class SWA`](../tfa/optimizers/SWA.md): This class extends optimizers with Stochastic Weight Averaging (SWA).
+
+[`class Triangular2CyclicalLearningRate`](../tfa/optimizers/Triangular2CyclicalLearningRate.md): A LearningRateSchedule that uses cyclical schedule.
+
+[`class TriangularCyclicalLearningRate`](../tfa/optimizers/TriangularCyclicalLearningRate.md): A LearningRateSchedule that uses cyclical schedule.
+
+## Functions
+
+[`extend_with_decoupled_weight_decay(...)`](../tfa/optimizers/extend_with_decoupled_weight_decay.md): Factory function returning an optimizer class with decoupled weight
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/AdamW.md b/docs/api_docs/python/tfa/optimizers/AdamW.md
new file mode 100644
index 0000000000..2845806ea8
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/AdamW.md
@@ -0,0 +1,401 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.AdamW
+
+
+
+
+
+
+
+
+## Class `AdamW`
+
+Optimizer that implements the Adam algorithm with weight decay.
+
+Inherits From: [`DecoupledWeightDecayExtension`](../../tfa/optimizers/weight_decay_optimizers/DecoupledWeightDecayExtension.md)
+
+**Aliases**: `tfa.optimizers.weight_decay_optimizers.AdamW`
+
+
+
+This is an implementation of the AdamW optimizer described in "Decoupled
+Weight Decay Regularization" by Loshchilov & Hutter
+(https://arxiv.org/abs/1711.05101)
+([pdf])(https://arxiv.org/pdf/1711.05101.pdf).
+
+It computes the update step of `tf.keras.optimizers.Adam` and additionally
+decays the variable. Note that this is different from adding L2
+regularization on the variables to the loss: it regularizes variables with
+large gradients more than L2 regularization would, which was shown to yield
+better training loss and generalization error in the paper above.
+
+For further information see the documentation of the Adam Optimizer.
+
+This optimizer can also be instantiated as
+```python
+extend_with_decoupled_weight_decay(tf.keras.optimizers.Adam,
+ weight_decay=weight_decay)
+```
+
+Note: when applying a decay to the learning rate, be sure to manually apply
+the decay to the `weight_decay` as well. For example:
+
+```python
+step = tf.Variable(0, trainable=False)
+schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
+ [10000, 15000], [1e-0, 1e-1, 1e-2])
+# lr and wd can be a function or a tensor
+lr = 1e-1 * schedule(step)
+wd = lambda: 1e-4 * schedule(step)
+
+# ...
+
+optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ weight_decay,
+ learning_rate=0.001,
+ beta_1=0.9,
+ beta_2=0.999,
+ epsilon=1e-07,
+ amsgrad=False,
+ name='AdamW',
+ **kwargs
+)
+```
+
+Construct a new AdamW optimizer.
+
+For further information see the documentation of the Adam Optimizer.
+
+#### Args:
+
+
+* `weight_decay`: A Tensor or a floating point value. The weight decay.
+* `learning_rate`: A Tensor or a floating point value. The learning
+ rate.
+* `beta_1`: A float value or a constant float tensor. The exponential
+ decay rate for the 1st moment estimates.
+* `beta_2`: A float value or a constant float tensor. The exponential
+ decay rate for the 2nd moment estimates.
+* `epsilon`: A small constant for numerical stability. This epsilon is
+ "epsilon hat" in the Kingma and Ba paper (in the formula just
+ before Section 2.1), not the epsilon in Algorithm 1 of the
+ paper.
+* `amsgrad`: boolean. Whether to apply AMSGrad variant of this
+ algorithm from the paper "On the Convergence of Adam and
+ beyond".
+* `name`: Optional name for the operations created when applying
+ gradients. Defaults to "AdamW".
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`,
+ `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by
+ norm; `clipvalue` is clip gradients by value, `decay` is
+ included for backward compatibility to allow time inverse decay
+ of learning rate. `lr` is included for backward compatibility,
+ recommended to use `learning_rate` instead.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+View source
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None,
+ decay_var_list=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the
+ name passed to the `Optimizer` constructor.
+* `decay_var_list`: Optional list of variables to be decayed. Defaults
+ to all variables in var_list.
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. If
+`global_step` was not None, that operation also increments
+`global_step`.
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+View source
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None,
+ decay_var_list=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before
+applying then call `tf.GradientTape` and `apply_gradients()` explicitly
+instead of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to
+ minimize.
+* `var_list`: list or tuple of `Variable` objects to update to
+ minimize `loss`, or a callable returning the list or tuple of
+ `Variable` objects. Use callable when the variable list would
+ otherwise be incomplete before `minimize` since the variables
+ are created at the first time `loss` is called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for
+ `loss`.
+* `decay_var_list`: Optional list of variables to be decayed. Defaults
+ to all variables in var_list.
+* `name`: Optional name for the returned operation.
+
+#### Returns:
+
+An Operation that updates the variables in `var_list`. If
+`global_step` was not `None`, that operation also increments
+`global_step`.
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/AveragedOptimizerWrapper.md b/docs/api_docs/python/tfa/optimizers/AveragedOptimizerWrapper.md
new file mode 100644
index 0000000000..92670020e4
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/AveragedOptimizerWrapper.md
@@ -0,0 +1,583 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.AveragedOptimizerWrapper
+
+
+
+
+
+
+
+
+## Class `AveragedOptimizerWrapper`
+
+Updated base class for optimizers.
+
+
+
+**Aliases**: `tfa.optimizers.average_wrapper.AveragedOptimizerWrapper`
+
+
+
+This class defines the API to add Ops to train a model. You never use this
+class directly, but instead instantiate one of its subclasses such as
+`tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`.
+
+### Usage
+
+```python
+# Create an optimizer with the desired parameters.
+opt = tf.keras.optimizers.SGD(learning_rate=0.1)
+# `loss` is a callable that takes no argument and returns the value
+# to minimize.
+loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
+# In graph mode, returns op that minimizes the loss by updating the listed
+# variables.
+opt_op = opt.minimize(loss, var_list=[var1, var2])
+opt_op.run()
+# In eager mode, simply call minimize to update the list of variables.
+opt.minimize(loss, var_list=[var1, var2])
+```
+
+### Custom training loop with Keras models
+
+In Keras models, sometimes variables are created when the model is first
+called, instead of construction time. Examples include 1) sequential models
+without input shape pre-defined, or 2) subclassed models. Pass var_list as
+callable in these cases.
+
+#### Example:
+
+
+```python
+opt = tf.keras.optimizers.SGD(learning_rate=0.1)
+model = tf.keras.Sequential()
+model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))
+model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid'))
+loss_fn = lambda: tf.keras.losses.mse(model(input), output)
+var_list_fn = lambda: model.trainable_weights
+for input, output in data:
+ opt.minimize(loss_fn, var_list_fn)
+```
+
+### Processing gradients before applying them.
+
+Calling `minimize()` takes care of both computing the gradients and
+applying them to the variables. If you want to process the gradients
+before applying them you can instead use the optimizer in three steps:
+
+1. Compute the gradients with `tf.GradientTape`.
+2. Process the gradients as you wish.
+3. Apply the processed gradients with `apply_gradients()`.
+
+#### Example:
+
+
+
+```python
+# Create an optimizer.
+opt = tf.keras.optimizers.SGD(learning_rate=0.1)
+
+# Compute the gradients for a list of variables.
+with tf.GradientTape() as tape:
+ loss =
+vars =
+grads = tape.gradient(loss, vars)
+
+# Process the gradients, for example cap them, etc.
+# capped_grads = [MyCapper(g) for g in grads]
+processed_grads = [process_gradient(g) for g in grads]
+
+# Ask the optimizer to apply the processed gradients.
+opt.apply_gradients(zip(processed_grads, var_list))
+```
+
+### Use with `tf.distribute.Strategy`.
+
+This optimizer class is `tf.distribute.Strategy` aware, which means it
+automatically sums gradients across all replicas. To average gradients,
+you divide your loss by the global batch size, which is done
+automatically if you use `tf.keras` built-in training or evaluation loops.
+See the `reduction` argument of your loss which should be set to
+`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or
+`tf.keras.losses.Reduction.SUM` for not.
+
+If you are not using these and you want to average gradients, you should use
+`tf.math.reduce_sum` to add up your per-example losses and then divide by the
+global batch size. Note that when using `tf.distribute.Strategy`, the first
+component of a tensor's shape is the *replica-local* batch size, which is off
+by a factor equal to the number of replicas being used to compute a single
+step. As a result, using `tf.math.reduce_mean` will give the wrong answer,
+resulting in gradients that can be many times too big.
+
+### Variable Constraint
+
+All Keras optimizers respect variable constraints. If constraint function is
+passed to any variable, the constraint will be applied to the variable after
+the gradient has been applied to the variable.
+Important: If gradient is sparse tensor, variable constraint is not supported.
+
+### Thread Compatibility
+
+The entire optimizer is currently thread compatible, not thread-safe. The user
+needs to perform synchronization if necessary.
+
+### Slots
+
+Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage
+additional variables associated with the variables to train. These are called
+Slots. Slots have names and you can ask the optimizer for the names of
+the slots that it uses. Once you have a slot name you can ask the optimizer
+for the variable it created to hold the slot value.
+
+This can be useful if you want to log debug a training algorithm, report stats
+about the slots, etc.
+
+### Hyper parameters
+
+These are arguments passed to the optimizer subclass constructor
+(the `__init__` method), and then passed to `self._set_hyper()`.
+They can be either regular Python values (like 1.0), tensors, or
+callables. If they are callable, the callable will be called during
+`apply_gradients()` to get the value for the hyper parameter.
+
+Hyper parameters can be overwritten through user code:
+
+#### Example:
+
+
+
+```python
+# Create an optimizer with the desired parameters.
+opt = tf.keras.optimizers.SGD(learning_rate=0.1)
+# `loss` is a callable that takes no argument and returns the value
+# to minimize.
+loss = lambda: 3 * var1 + 2 * var2
+# In eager mode, simply call minimize to update the list of variables.
+opt.minimize(loss, var_list=[var1, var2])
+# update learning rate
+opt.learning_rate = 0.05
+opt.minimize(loss, var_list=[var1, var2])
+```
+
+### Write a customized optimizer.
+If you intend to create your own optimization algorithm, simply inherit from
+this class and override the following methods:
+
+ - resource_apply_dense (update variable given gradient tensor is dense)
+ - resource_apply_sparse (update variable given gradient tensor is sparse)
+ - create_slots (if your optimizer algorithm requires additional variables)
+ - get_config (serialization of the optimizer, include all hyper parameters)
+
+__init__
+
+View source
+
+``` python
+__init__(
+ optimizer,
+ sequential_update=True,
+ name='AverageOptimizer',
+ **kwargs
+)
+```
+
+Create a new Optimizer.
+
+This must be called by the constructors of subclasses.
+Note that Optimizer instances should not bind to a single graph,
+and so shouldn't keep Tensors as member variables. Generally
+you should be able to use the _set_hyper()/state.get_hyper()
+facility instead.
+
+This class in stateful and thread-compatible.
+
+#### Args:
+
+
+* `name`: A non-empty string. The name to use for accumulators created
+ for the optimizer.
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
+ `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
+ gradients by value, `decay` is included for backward compatibility to
+ allow time inverse decay of learning rate. `lr` is included for backward
+ compatibility, recommended to use `learning_rate` instead.
+
+
+#### Raises:
+
+
+* `ValueError`: If name is malformed.
+* `RuntimeError`: If _create_slots has been overridden instead of
+ _create_vars.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+learning_rate
+
+
+
+
+lr
+
+
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+View source
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the name
+ passed to the `Optimizer` constructor.
+
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+assign_average_vars
+
+View source
+
+``` python
+assign_average_vars(var_list)
+```
+
+Assign variables in var_list with their respective averages.
+
+
+#### Args:
+
+
+* `var_list`: List of model variables to be assigned to their average.
+
+
+#### Returns:
+
+
+* `assign_op`: The op corresponding to the assignment operation of
+variables to their average.
+
+
+#### Example:
+
+
+```python
+model = tf.Sequential([...])
+opt = tfa.optimizers.SWA(
+ tf.keras.optimizers.SGD(lr=2.0), 100, 10)
+model.compile(opt, ...)
+model.fit(x, y, ...)
+
+# Update the weights to their mean before saving
+opt.assign_average_vars(model.variables)
+
+model.save('model.h5')
+```
+
+average_op
+
+View source
+
+``` python
+average_op(
+ var,
+ average_var
+)
+```
+
+
+
+
+from_config
+
+View source
+
+``` python
+@classmethod
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the optimimizer.
+
+An optimizer config is a Python dictionary (serializable)
+containing the configuration of an optimizer.
+The same optimizer can be reinstantiated later
+(without any saved state) from this configuration.
+
+#### Returns:
+
+Python dictionary.
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before applying
+then call `tf.GradientTape` and `apply_gradients()` explicitly instead
+of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to minimize.
+* `var_list`: list or tuple of `Variable` objects to update to minimize
+ `loss`, or a callable returning the list or tuple of `Variable` objects.
+ Use callable when the variable list would otherwise be incomplete before
+ `minimize` since the variables are created at the first time `loss` is
+ called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for `loss`.
+* `name`: Optional name for the returned operation.
+
+
+#### Returns:
+
+An `Operation` that updates the variables in `var_list`. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/ConditionalGradient.md b/docs/api_docs/python/tfa/optimizers/ConditionalGradient.md
new file mode 100644
index 0000000000..f2641d0730
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/ConditionalGradient.md
@@ -0,0 +1,374 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.ConditionalGradient
+
+
+
+
+
+
+
+
+## Class `ConditionalGradient`
+
+Optimizer that implements the Conditional Gradient optimization.
+
+
+
+**Aliases**: `tfa.optimizers.conditional_gradient.ConditionalGradient`
+
+
+
+This optimizer helps handle constraints well.
+
+Currently only supports frobenius norm constraint.
+See https://arxiv.org/pdf/1803.06453.pdf
+
+```
+variable -= (1-learning_rate) * (variable + lambda_ * gradient
+ / (frobenius_norm(gradient) + epsilon))
+```
+
+Note that `lambda_` here refers to the constraint "lambda" in
+the paper. `epsilon` is constant with tiny value as compared to
+the value of frobenius norm of gradient. The purpose of `epsilon`
+here is to avoid the case that the value of frobenius norm of
+gradient is 0.
+
+In this implementation, `epsilon` defaults to $10^{-7}$.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ learning_rate,
+ lambda_,
+ epsilon=1e-07,
+ use_locking=False,
+ name='ConditionalGradient',
+ **kwargs
+)
+```
+
+Construct a new conditional gradient optimizer.
+
+
+#### Args:
+
+
+* `learning_rate`: A `Tensor` or a floating point value. or a schedule
+ that is a `tf.keras.optimizers.schedules.LearningRateSchedule`
+ The learning rate.
+* `lambda_`: A `Tensor` or a floating point value. The constraint.
+* `epsilon`: A `Tensor` or a floating point value. A small constant
+ for numerical stability when handling the case of norm of
+ gradient to be zero.
+* `use_locking`: If `True`, use locks for update operations.
+* `name`: Optional name prefix for the operations created when
+ applying gradients. Defaults to 'ConditionalGradient'.
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`,
+ `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
+ by norm; `clipvalue` is clip gradients by value, `decay` is
+ included for backward compatibility to allow time inverse
+ decay of learning rate. `lr` is included for backward
+ compatibility, recommended to use `learning_rate` instead.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the name
+ passed to the `Optimizer` constructor.
+
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the optimimizer.
+
+An optimizer config is a Python dictionary (serializable)
+containing the configuration of an optimizer.
+The same optimizer can be reinstantiated later
+(without any saved state) from this configuration.
+
+#### Returns:
+
+Python dictionary.
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before applying
+then call `tf.GradientTape` and `apply_gradients()` explicitly instead
+of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to minimize.
+* `var_list`: list or tuple of `Variable` objects to update to minimize
+ `loss`, or a callable returning the list or tuple of `Variable` objects.
+ Use callable when the variable list would otherwise be incomplete before
+ `minimize` since the variables are created at the first time `loss` is
+ called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for `loss`.
+* `name`: Optional name for the returned operation.
+
+
+#### Returns:
+
+An `Operation` that updates the variables in `var_list`. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/CyclicalLearningRate.md b/docs/api_docs/python/tfa/optimizers/CyclicalLearningRate.md
new file mode 100644
index 0000000000..909dd8c65c
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/CyclicalLearningRate.md
@@ -0,0 +1,149 @@
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.CyclicalLearningRate
+
+
+
+
+
+
+
+
+## Class `CyclicalLearningRate`
+
+A LearningRateSchedule that uses cyclical schedule.
+
+
+
+**Aliases**: `tfa.optimizers.cyclical_learning_rate.CyclicalLearningRate`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ initial_learning_rate,
+ maximal_learning_rate,
+ step_size,
+ scale_fn,
+ scale_mode='cycle',
+ name=None
+)
+```
+
+Applies cyclical schedule to the learning rate.
+
+See Cyclical Learning Rates for Training Neural Networks. https://arxiv.org/abs/1506.01186
+
+
+```python
+lr_schedule = tf.keras.optimizers.schedules.CyclicalLearningRate(
+ initial_learning_rate=1e-4,
+ maximal_learning_rate=1e-2,
+ step_size=2000,
+ scale_fn=lambda x: 1.,
+ scale_mode="cycle",
+ name="MyCyclicScheduler")
+
+model.compile(optimizer=tf.keras.optimizers.SGD(
+ learning_rate=lr_schedule),
+ loss='sparse_categorical_crossentropy',
+ metrics=['accuracy'])
+
+model.fit(data, labels, epochs=5)
+```
+
+You can pass this schedule directly into a
+`tf.keras.optimizers.Optimizer` as the learning rate.
+
+#### Args:
+
+
+* `initial_learning_rate`: A scalar `float32` or `float64` `Tensor` or
+ a Python number. The initial learning rate.
+* `maximal_learning_rate`: A scalar `float32` or `float64` `Tensor` or
+ a Python number. The maximum learning rate.
+* `step_size`: A scalar `float32` or `float64` `Tensor` or a
+ Python number. Step size.
+* `scale_fn`: A function. Scheduling function applied in cycle
+* `scale_mode`: ['cycle', 'iterations']. Mode to apply during cyclic
+ schedule
+* `name`: (Optional) Name for the operation.
+
+
+#### Returns:
+
+Updated learning rate value.
+
+
+
+
+## Methods
+
+__call__
+
+View source
+
+``` python
+__call__(step)
+```
+
+Call self as a function.
+
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `LearningRateSchedule` from its config.
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `LearningRateSchedule` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/ExponentialCyclicalLearningRate.md b/docs/api_docs/python/tfa/optimizers/ExponentialCyclicalLearningRate.md
new file mode 100644
index 0000000000..9b9fce0b1e
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/ExponentialCyclicalLearningRate.md
@@ -0,0 +1,153 @@
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.ExponentialCyclicalLearningRate
+
+
+
+
+
+
+
+
+## Class `ExponentialCyclicalLearningRate`
+
+A LearningRateSchedule that uses cyclical schedule.
+
+Inherits From: [`CyclicalLearningRate`](../../tfa/optimizers/CyclicalLearningRate.md)
+
+**Aliases**: `tfa.optimizers.cyclical_learning_rate.ExponentialCyclicalLearningRate`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ initial_learning_rate,
+ maximal_learning_rate,
+ step_size,
+ scale_mode='iterations',
+ gamma=1.0,
+ name='ExponentialCyclicalLearningRate'
+)
+```
+
+Applies exponential cyclical schedule to the learning rate.
+
+See Cyclical Learning Rates for Training Neural Networks. https://arxiv.org/abs/1506.01186
+
+
+```python
+from tf.keras.optimizers import schedules
+
+lr_schedule = ExponentialCyclicalLearningRate(
+ initial_learning_rate=1e-4,
+ maximal_learning_rate=1e-2,
+ step_size=2000,
+ scale_mode="cycle",
+ gamma=0.96,
+ name="MyCyclicScheduler")
+
+model.compile(optimizer=tf.keras.optimizers.SGD(
+ learning_rate=lr_schedule),
+ loss='sparse_categorical_crossentropy',
+ metrics=['accuracy'])
+
+model.fit(data, labels, epochs=5)
+```
+
+You can pass this schedule directly into a
+`tf.keras.optimizers.Optimizer` as the learning rate.
+
+#### Args:
+
+
+* `initial_learning_rate`: A scalar `float32` or `float64` `Tensor` or
+ a Python number. The initial learning rate.
+* `maximal_learning_rate`: A scalar `float32` or `float64` `Tensor` or
+ a Python number. The maximum learning rate.
+* `step_size`: A scalar `float32` or `float64` `Tensor` or a
+ Python number. Step size.
+* `scale_fn`: A function. Scheduling function applied in cycle
+* `scale_mode`: ['cycle', 'iterations']. Mode to apply during cyclic
+ schedule
+* `gamma`: A scalar `float32` or `float64` `Tensor` or a
+ Python number. Gamma value.
+* `name`: (Optional) Name for the operation.
+
+
+#### Returns:
+
+Updated learning rate value.
+
+
+
+
+## Methods
+
+__call__
+
+View source
+
+``` python
+__call__(step)
+```
+
+Call self as a function.
+
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `LearningRateSchedule` from its config.
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `LearningRateSchedule` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/LAMB.md b/docs/api_docs/python/tfa/optimizers/LAMB.md
new file mode 100644
index 0000000000..f40e460094
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/LAMB.md
@@ -0,0 +1,369 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.LAMB
+
+
+
+
+
+
+
+
+## Class `LAMB`
+
+Optimizer that implements the Layer-wise Adaptive Moments (LAMB).
+
+
+
+**Aliases**: `tfa.optimizers.lamb.LAMB`
+
+
+
+See paper [Large Batch Optimization for Deep Learning: Training BERT
+in 76 minutes](https://arxiv.org/abs/1904.00962).
+
+__init__
+
+View source
+
+``` python
+__init__(
+ learning_rate=0.001,
+ beta_1=0.9,
+ beta_2=0.999,
+ epsilon=1e-06,
+ weight_decay_rate=0.0,
+ exclude_from_weight_decay=None,
+ exclude_from_layer_adaptation=None,
+ name='LAMB',
+ **kwargs
+)
+```
+
+Construct a new LAMB optimizer.
+
+
+#### Args:
+
+
+* `learning_rate`: A `Tensor` or a floating point value. or a schedule
+ that is a `tf.keras.optimizers.schedules.LearningRateSchedule`
+ The learning rate.
+* `beta_1`: A `float` value or a constant `float` tensor.
+ The exponential decay rate for the 1st moment estimates.
+* `beta_2`: A `float` value or a constant `float` tensor.
+ The exponential decay rate for the 2nd moment estimates.
+* `epsilon`: A small constant for numerical stability.
+* `weight_decay_rate`: weight decay rate.
+* `exclude_from_weight_decay`: comma separated name patterns of
+ variables excluded from weight decay. Variables whose name
+ contain a substring matching the pattern will be excluded.
+* `exclude_from_layer_adaptation`: comma separated name patterns of
+ variables excluded from layer adaptation. Variables whose name
+ contain a substring matching the pattern will be excluded.
+* `name`: Optional name for the operations created when applying
+ gradients. Defaults to "LAMB".
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`,
+ `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by
+ norm; `clipvalue` is clip gradients by value, `decay` is
+ included for backward compatibility to allow time inverse
+ decay of learning rate. `lr` is included for backward
+ compatibility, recommended to use `learning_rate` instead.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the name
+ passed to the `Optimizer` constructor.
+
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the optimimizer.
+
+An optimizer config is a Python dictionary (serializable)
+containing the configuration of an optimizer.
+The same optimizer can be reinstantiated later
+(without any saved state) from this configuration.
+
+#### Returns:
+
+Python dictionary.
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before applying
+then call `tf.GradientTape` and `apply_gradients()` explicitly instead
+of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to minimize.
+* `var_list`: list or tuple of `Variable` objects to update to minimize
+ `loss`, or a callable returning the list or tuple of `Variable` objects.
+ Use callable when the variable list would otherwise be incomplete before
+ `minimize` since the variables are created at the first time `loss` is
+ called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for `loss`.
+* `name`: Optional name for the returned operation.
+
+
+#### Returns:
+
+An `Operation` that updates the variables in `var_list`. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/LazyAdam.md b/docs/api_docs/python/tfa/optimizers/LazyAdam.md
new file mode 100644
index 0000000000..c072bdbc95
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/LazyAdam.md
@@ -0,0 +1,377 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.LazyAdam
+
+
+
+
+
+
+
+
+## Class `LazyAdam`
+
+Variant of the Adam optimizer that handles sparse updates more
+
+
+
+**Aliases**: `tfa.optimizers.lazy_adam.LazyAdam`
+
+
+efficiently.
+
+The original Adam algorithm maintains two moving-average accumulators for
+each trainable variable; the accumulators are updated at every step.
+This class provides lazier handling of gradient updates for sparse
+variables. It only updates moving-average accumulators for sparse variable
+indices that appear in the current batch, rather than updating the
+accumulators for all indices. Compared with the original Adam optimizer,
+it can provide large improvements in model training throughput for some
+applications. However, it provides slightly different semantics than the
+original Adam algorithm, and may lead to different empirical results.
+
+Note, amsgrad is currently not supported and the argument can only be
+False.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ learning_rate=0.001,
+ beta_1=0.9,
+ beta_2=0.999,
+ epsilon=1e-07,
+ amsgrad=False,
+ name='LazyAdam',
+ **kwargs
+)
+```
+
+Constructs a new LazyAdam optimizer.
+
+
+#### Args:
+
+
+* `learning_rate`: A `Tensor` or a floating point value. or a schedule
+ that is a `tf.keras.optimizers.schedules.LearningRateSchedule`
+ The learning rate.
+* `beta_1`: A `float` value or a constant `float` tensor.
+ The exponential decay rate for the 1st moment estimates.
+* `beta_2`: A `float` value or a constant `float` tensor.
+ The exponential decay rate for the 2nd moment estimates.
+* `epsilon`: A small constant for numerical stability.
+ This epsilon is "epsilon hat" in
+ [Adam: A Method for Stochastic Optimization. Kingma et al., 2014]
+ (http://arxiv.org/abs/1412.6980) (in the formula just
+ before Section 2.1), not the epsilon in Algorithm 1 of the paper.
+* `amsgrad`: `boolean`. Whether to apply AMSGrad variant of this
+ algorithm from the paper "On the Convergence of Adam and beyond".
+ Note that this argument is currently not supported and the
+ argument can only be `False`.
+* `name`: Optional name for the operations created when applying
+ gradients. Defaults to "LazyAdam".
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,
+ `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue`
+ is clip gradients by value, `decay` is included for backward
+ compatibility to allow time inverse decay of learning rate. `lr`
+ is included for backward compatibility, recommended to use
+ `learning_rate` instead.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the name
+ passed to the `Optimizer` constructor.
+
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+Returns the config of the optimimizer.
+
+An optimizer config is a Python dictionary (serializable)
+containing the configuration of an optimizer.
+The same optimizer can be reinstantiated later
+(without any saved state) from this configuration.
+
+#### Returns:
+
+Python dictionary.
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before applying
+then call `tf.GradientTape` and `apply_gradients()` explicitly instead
+of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to minimize.
+* `var_list`: list or tuple of `Variable` objects to update to minimize
+ `loss`, or a callable returning the list or tuple of `Variable` objects.
+ Use callable when the variable list would otherwise be incomplete before
+ `minimize` since the variables are created at the first time `loss` is
+ called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for `loss`.
+* `name`: Optional name for the returned operation.
+
+
+#### Returns:
+
+An `Operation` that updates the variables in `var_list`. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/Lookahead.md b/docs/api_docs/python/tfa/optimizers/Lookahead.md
new file mode 100644
index 0000000000..09b72bac7c
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/Lookahead.md
@@ -0,0 +1,388 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.Lookahead
+
+
+
+
+
+
+
+
+## Class `Lookahead`
+
+This class allows to extend optimizers with the lookahead mechanism.
+
+
+
+**Aliases**: `tfa.optimizers.lookahead.Lookahead`
+
+
+
+The mechanism is proposed by Michael R. Zhang et.al in the paper
+[Lookahead Optimizer: k steps forward, 1 step back]
+(https://arxiv.org/abs/1907.08610v1). The optimizer iteratively updates two
+sets of weights: the search directions for weights are chosen by the inner
+optimizer, while the "slow weights" are updated each `k` steps based on the
+directions of the "fast weights" and the two sets of weights are
+synchronized. This method improves the learning stability and lowers the
+variance of its inner optimizer.
+
+#### Example of usage:
+
+
+
+```python
+opt = tf.keras.optimizers.SGD(learning_rate)
+opt = tfa.optimizers.Lookahead(opt)
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ optimizer,
+ sync_period=6,
+ slow_step_size=0.5,
+ name='Lookahead',
+ **kwargs
+)
+```
+
+Wrap optimizer with the lookahead mechanism.
+
+
+#### Args:
+
+
+* `optimizer`: The original optimizer that will be used to compute
+ and apply the gradients.
+* `sync_period`: An integer. The synchronization period of lookahead.
+ Enable lookahead mechanism by setting it with a positive value.
+* `slow_step_size`: A floating point value.
+ The ratio for updating the slow weights.
+* `name`: Optional name for the operations created when applying
+ gradients. Defaults to "Lookahead".
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`,
+ `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
+ by norm; `clipvalue` is clip gradients by value, `decay` is
+ included for backward compatibility to allow time inverse
+ decay of learning rate. `lr` is included for backward
+ compatibility, recommended to use `learning_rate` instead.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+learning_rate
+
+
+
+
+lr
+
+
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+View source
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the name
+ passed to the `Optimizer` constructor.
+
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+from_config
+
+View source
+
+``` python
+@classmethod
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the optimimizer.
+
+An optimizer config is a Python dictionary (serializable)
+containing the configuration of an optimizer.
+The same optimizer can be reinstantiated later
+(without any saved state) from this configuration.
+
+#### Returns:
+
+Python dictionary.
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before applying
+then call `tf.GradientTape` and `apply_gradients()` explicitly instead
+of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to minimize.
+* `var_list`: list or tuple of `Variable` objects to update to minimize
+ `loss`, or a callable returning the list or tuple of `Variable` objects.
+ Use callable when the variable list would otherwise be incomplete before
+ `minimize` since the variables are created at the first time `loss` is
+ called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for `loss`.
+* `name`: Optional name for the returned operation.
+
+
+#### Returns:
+
+An `Operation` that updates the variables in `var_list`. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/MovingAverage.md b/docs/api_docs/python/tfa/optimizers/MovingAverage.md
new file mode 100644
index 0000000000..2bdb0247e9
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/MovingAverage.md
@@ -0,0 +1,446 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.MovingAverage
+
+
+
+
+
+
+
+
+## Class `MovingAverage`
+
+Optimizer that computes a moving average of the variables.
+
+Inherits From: [`AveragedOptimizerWrapper`](../../tfa/optimizers/AveragedOptimizerWrapper.md)
+
+**Aliases**: `tfa.optimizers.moving_average.MovingAverage`
+
+
+
+Empirically it has been found that using the moving average of the trained
+parameters of a deep network is better than using its trained parameters
+directly. This optimizer allows you to compute this moving average and swap
+the variables at save time so that any code outside of the training loop
+will use by default the average values instead of the original ones.
+
+#### Example of usage:
+
+
+
+```python
+opt = tf.keras.optimizers.SGD(learning_rate)
+opt = tfa.optimizers.MovingAverage(opt)
+
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ optimizer,
+ sequential_update=True,
+ average_decay=0.99,
+ num_updates=None,
+ name='MovingAverage',
+ **kwargs
+)
+```
+
+Construct a new MovingAverage optimizer.
+
+
+#### Args:
+
+
+* `optimizer`: str or `tf.keras.optimizers.Optimizer` that will be
+ used to compute and apply gradients.
+* `sequential_update`: Bool. If False, will compute the moving average
+ at the same time as the model is updated, potentially doing
+ benign data races. If True, will update the moving average
+ after gradient updates.
+* `average_decay`: float. Decay to use to maintain the moving averages
+ of trained variables.
+* `num_updates`: Optional count of the number of updates applied to
+ variables.
+* `name`: Optional name for the operations created when applying
+ gradients. Defaults to "MovingAverage".
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`,
+ `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by
+ norm; `clipvalue` is clip gradients by value, `decay` is
+ included for backward compatibility to allow time inverse
+ decay of learning rate. `lr` is included for backward
+ compatibility, recommended to use `learning_rate` instead.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+learning_rate
+
+
+
+
+lr
+
+
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+View source
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the name
+ passed to the `Optimizer` constructor.
+
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+assign_average_vars
+
+View source
+
+``` python
+assign_average_vars(var_list)
+```
+
+Assign variables in var_list with their respective averages.
+
+
+#### Args:
+
+
+* `var_list`: List of model variables to be assigned to their average.
+
+
+#### Returns:
+
+
+* `assign_op`: The op corresponding to the assignment operation of
+variables to their average.
+
+
+#### Example:
+
+
+```python
+model = tf.Sequential([...])
+opt = tfa.optimizers.SWA(
+ tf.keras.optimizers.SGD(lr=2.0), 100, 10)
+model.compile(opt, ...)
+model.fit(x, y, ...)
+
+# Update the weights to their mean before saving
+opt.assign_average_vars(model.variables)
+
+model.save('model.h5')
+```
+
+average_op
+
+View source
+
+``` python
+average_op(
+ var,
+ average_var
+)
+```
+
+
+
+
+from_config
+
+View source
+
+``` python
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the optimimizer.
+
+An optimizer config is a Python dictionary (serializable)
+containing the configuration of an optimizer.
+The same optimizer can be reinstantiated later
+(without any saved state) from this configuration.
+
+#### Returns:
+
+Python dictionary.
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before applying
+then call `tf.GradientTape` and `apply_gradients()` explicitly instead
+of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to minimize.
+* `var_list`: list or tuple of `Variable` objects to update to minimize
+ `loss`, or a callable returning the list or tuple of `Variable` objects.
+ Use callable when the variable list would otherwise be incomplete before
+ `minimize` since the variables are created at the first time `loss` is
+ called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for `loss`.
+* `name`: Optional name for the returned operation.
+
+
+#### Returns:
+
+An `Operation` that updates the variables in `var_list`. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/RectifiedAdam.md b/docs/api_docs/python/tfa/optimizers/RectifiedAdam.md
new file mode 100644
index 0000000000..a3d5cb449e
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/RectifiedAdam.md
@@ -0,0 +1,420 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.RectifiedAdam
+
+
+
+
+
+
+
+
+## Class `RectifiedAdam`
+
+Variant of the Adam optimizer whose adaptive learning rate is rectified
+
+
+
+**Aliases**: `tfa.optimizers.rectified_adam.RectifiedAdam`
+
+
+so as to have a consistent variance.
+
+It implements the Rectified Adam (a.k.a. RAdam) proposed by
+Liyuan Liu et al. in [On The Variance Of The Adaptive Learning Rate
+And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).
+
+#### Example of usage:
+
+
+
+```python
+opt = tfa.optimizers.RectifiedAdam(lr=1e-3)
+```
+
+Note: `amsgrad` is not described in the original paper. Use it with
+ caution.
+
+RAdam is not a placement of the heuristic warmup, the settings should be
+kept if warmup has already been employed and tuned in the baseline method.
+You can enable warmup by setting `total_steps` and `warmup_proportion`:
+
+```python
+opt = tfa.optimizers.RectifiedAdam(
+ lr=1e-3,
+ total_steps=10000,
+ warmup_proportion=0.1,
+ min_lr=1e-5,
+)
+```
+
+In the above example, the learning rate will increase linearly
+from 0 to `lr` in 1000 steps, then decrease linearly from `lr` to `min_lr`
+in 9000 steps.
+
+Lookahead, proposed by Michael R. Zhang et.al in the paper
+[Lookahead Optimizer: k steps forward, 1 step back]
+(https://arxiv.org/abs/1907.08610v1), can be integrated with RAdam,
+which is announced by Less Wright and the new combined optimizer can also
+be called "Ranger". The mechanism can be enabled by using the lookahead
+wrapper. For example:
+
+```python
+radam = tfa.optimizers.RectifiedAdam()
+ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ learning_rate=0.001,
+ beta_1=0.9,
+ beta_2=0.999,
+ epsilon=1e-07,
+ weight_decay=0.0,
+ amsgrad=False,
+ sma_threshold=5.0,
+ total_steps=0,
+ warmup_proportion=0.1,
+ min_lr=0.0,
+ name='RectifiedAdam',
+ **kwargs
+)
+```
+
+Construct a new RAdam optimizer.
+
+
+#### Args:
+
+
+* `learning_rate`: A `Tensor` or a floating point value. or a schedule
+ that is a `tf.keras.optimizers.schedules.LearningRateSchedule`
+ The learning rate.
+* `beta_1`: A float value or a constant float tensor.
+ The exponential decay rate for the 1st moment estimates.
+* `beta_2`: A float value or a constant float tensor.
+ The exponential decay rate for the 2nd moment estimates.
+* `epsilon`: A small constant for numerical stability.
+* `weight_decay`: A floating point value. Weight decay for each param.
+* `amsgrad`: boolean. Whether to apply AMSGrad variant of this
+ algorithm from the paper "On the Convergence of Adam and
+ beyond".
+sma_threshold. A float value.
+ The threshold for simple mean average.
+* `total_steps`: An integer. Total number of training steps.
+ Enable warmup by setting a positive value.
+* `warmup_proportion`: A floating point value.
+ The proportion of increasing steps.
+* `min_lr`: A floating point value. Minimum learning rate after warmup.
+* `name`: Optional name for the operations created when applying
+ gradients. Defaults to "RectifiedAdam".
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`,
+ `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
+ by norm; `clipvalue` is clip gradients by value, `decay` is
+ included for backward compatibility to allow time inverse
+ decay of learning rate. `lr` is included for backward
+ compatibility, recommended to use `learning_rate` instead.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the name
+ passed to the `Optimizer` constructor.
+
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the optimimizer.
+
+An optimizer config is a Python dictionary (serializable)
+containing the configuration of an optimizer.
+The same optimizer can be reinstantiated later
+(without any saved state) from this configuration.
+
+#### Returns:
+
+Python dictionary.
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before applying
+then call `tf.GradientTape` and `apply_gradients()` explicitly instead
+of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to minimize.
+* `var_list`: list or tuple of `Variable` objects to update to minimize
+ `loss`, or a callable returning the list or tuple of `Variable` objects.
+ Use callable when the variable list would otherwise be incomplete before
+ `minimize` since the variables are created at the first time `loss` is
+ called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for `loss`.
+* `name`: Optional name for the returned operation.
+
+
+#### Returns:
+
+An `Operation` that updates the variables in `var_list`. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+View source
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/SGDW.md b/docs/api_docs/python/tfa/optimizers/SGDW.md
new file mode 100644
index 0000000000..9dd7d95ce7
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/SGDW.md
@@ -0,0 +1,389 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.SGDW
+
+
+
+
+
+
+
+
+## Class `SGDW`
+
+Optimizer that implements the Momentum algorithm with weight_decay.
+
+Inherits From: [`DecoupledWeightDecayExtension`](../../tfa/optimizers/weight_decay_optimizers/DecoupledWeightDecayExtension.md)
+
+**Aliases**: `tfa.optimizers.weight_decay_optimizers.SGDW`
+
+
+
+This is an implementation of the SGDW optimizer described in "Decoupled
+Weight Decay Regularization" by Loshchilov & Hutter
+(https://arxiv.org/abs/1711.05101)
+([pdf])(https://arxiv.org/pdf/1711.05101.pdf).
+It computes the update step of `tf.keras.optimizers.SGD` and additionally
+decays the variable. Note that this is different from adding
+L2 regularization on the variables to the loss. Decoupling the weight decay
+from other hyperparameters (in particular the learning rate) simplifies
+hyperparameter search.
+
+For further information see the documentation of the SGD Optimizer.
+
+This optimizer can also be instantiated as
+```python
+extend_with_decoupled_weight_decay(tf.keras.optimizers.SGD,
+ weight_decay=weight_decay)
+```
+
+Note: when applying a decay to the learning rate, be sure to manually apply
+the decay to the `weight_decay` as well. For example:
+
+```python
+step = tf.Variable(0, trainable=False)
+schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
+ [10000, 15000], [1e-0, 1e-1, 1e-2])
+# lr and wd can be a function or a tensor
+lr = 1e-1 * schedule(step)
+wd = lambda: 1e-4 * schedule(step)
+
+# ...
+
+optimizer = tfa.optimizers.SGDW(
+ learning_rate=lr, weight_decay=wd, momentum=0.9)
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ weight_decay,
+ learning_rate=0.001,
+ momentum=0.0,
+ nesterov=False,
+ name='SGDW',
+ **kwargs
+)
+```
+
+Construct a new SGDW optimizer.
+
+For further information see the documentation of the SGD Optimizer.
+
+#### Args:
+
+
+* `learning_rate`: float hyperparameter >= 0. Learning rate.
+* `momentum`: float hyperparameter >= 0 that accelerates SGD in the
+ relevant direction and dampens oscillations.
+* `nesterov`: boolean. Whether to apply Nesterov momentum.
+* `name`: Optional name prefix for the operations created when applying
+ gradients. Defaults to 'SGD'.
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`,
+ `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by
+ norm; `clipvalue` is clip gradients by value, `decay` is
+ included for backward compatibility to allow time inverse decay
+ of learning rate. `lr` is included for backward compatibility,
+ recommended to use `learning_rate` instead.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+View source
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None,
+ decay_var_list=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the
+ name passed to the `Optimizer` constructor.
+* `decay_var_list`: Optional list of variables to be decayed. Defaults
+ to all variables in var_list.
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. If
+`global_step` was not None, that operation also increments
+`global_step`.
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+View source
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None,
+ decay_var_list=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before
+applying then call `tf.GradientTape` and `apply_gradients()` explicitly
+instead of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to
+ minimize.
+* `var_list`: list or tuple of `Variable` objects to update to
+ minimize `loss`, or a callable returning the list or tuple of
+ `Variable` objects. Use callable when the variable list would
+ otherwise be incomplete before `minimize` since the variables
+ are created at the first time `loss` is called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for
+ `loss`.
+* `decay_var_list`: Optional list of variables to be decayed. Defaults
+ to all variables in var_list.
+* `name`: Optional name for the returned operation.
+
+#### Returns:
+
+An Operation that updates the variables in `var_list`. If
+`global_step` was not `None`, that operation also increments
+`global_step`.
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/SWA.md b/docs/api_docs/python/tfa/optimizers/SWA.md
new file mode 100644
index 0000000000..5c15287767
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/SWA.md
@@ -0,0 +1,472 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.SWA
+
+
+
+
+
+
+
+
+## Class `SWA`
+
+This class extends optimizers with Stochastic Weight Averaging (SWA).
+
+Inherits From: [`AveragedOptimizerWrapper`](../../tfa/optimizers/AveragedOptimizerWrapper.md)
+
+**Aliases**: `tfa.optimizers.stochastic_weight_averaging.SWA`
+
+
+
+The Stochastic Weight Averaging mechanism was proposed by Pavel Izmailov
+et. al in the paper [Averaging Weights Leads to Wider Optima and
+Better Generalization](https://arxiv.org/abs/1803.05407). The optimizer
+implements averaging of multiple points along the trajectory of SGD. The
+optimizer expects an inner optimizer which will be used to apply the
+gradients to the variables and itself computes a running average of the
+variables every `k` steps (which generally corresponds to the end
+of a cycle when a cyclic learning rate is employed).
+
+We also allow the specification of the number of steps averaging
+should first happen after. Let's say, we want averaging to happen every `k`
+steps after the first `m` steps. After step `m` we'd take a snapshot of the
+variables and then average the weights appropriately at step `m + k`,
+`m + 2k` and so on. The assign_average_vars function can be called at the
+end of training to obtain the averaged_weights from the optimizer.
+
+Note: If your model has batch-normalization layers you would need to run
+the final weights through the data to compute the running mean and
+variance corresponding to the activations for each layer of the network.
+From the paper: If the DNN uses batch normalization we run one
+additional pass over the data, to compute the running mean and standard
+deviation of the activations for each layer of the network with SWA
+weights after the training is finished, since these statistics are not
+collected during training. For most deep learning libraries, such as
+PyTorch or Tensorflow, one can typically collect these statistics by
+making a forward pass over the data in training mode
+([Averaging Weights Leads to Wider Optima and Better
+Generalization](https://arxiv.org/abs/1803.05407))
+
+#### Example of usage:
+
+
+
+```python
+opt = tf.keras.optimizers.SGD(learning_rate)
+opt = tfa.optimizers.SWA(opt, start_averaging=m, average_period=k)
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ optimizer,
+ start_averaging=0,
+ average_period=10,
+ name='SWA',
+ sequential_update=True,
+ **kwargs
+)
+```
+
+Wrap optimizer with the Stochastic Weight Averaging mechanism.
+
+
+#### Args:
+
+
+* `optimizer`: The original optimizer that will be used to compute and
+ apply the gradients.
+* `start_averaging`: An integer. Threshold to start averaging using
+ SWA. Averaging only occurs at `start_averaging` iters, must
+ be >= 0. If start_averaging = m, the first snapshot will be
+ taken after the mth application of gradients (where the first
+ iteration is iteration 0).
+* `average_period`: An integer. The synchronization period of SWA. The
+ averaging occurs every average_period steps. Averaging period
+ needs to be >= 1.
+* `name`: Optional name for the operations created when applying
+ gradients. Defaults to 'SWA'.
+* `sequential_update`: Bool. If False, will compute the moving average
+ at the same time as the model is updated, potentially doing
+ benign data races. If True, will update the moving average
+ after gradient updates.
+* `**kwargs`: keyword arguments. Allowed to be {`clipnorm`,
+ `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by
+ norm; `clipvalue` is clip gradients by value, `decay` is
+ included for backward compatibility to allow time inverse
+ decay of learning rate. `lr` is included for backward
+ compatibility, recommended to use `learning_rate` instead.
+
+
+
+## Properties
+
+iterations
+
+Variable. The number of training steps this Optimizer has run.
+
+
+learning_rate
+
+
+
+
+lr
+
+
+
+
+weights
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+## Methods
+
+add_slot
+
+``` python
+add_slot(
+ var,
+ slot_name,
+ initializer='zeros'
+)
+```
+
+Add a new slot variable for `var`.
+
+
+add_weight
+
+``` python
+add_weight(
+ name,
+ shape,
+ dtype=None,
+ initializer='zeros',
+ trainable=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE
+)
+```
+
+
+
+
+apply_gradients
+
+View source
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the name
+ passed to the `Optimizer` constructor.
+
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+assign_average_vars
+
+View source
+
+``` python
+assign_average_vars(var_list)
+```
+
+Assign variables in var_list with their respective averages.
+
+
+#### Args:
+
+
+* `var_list`: List of model variables to be assigned to their average.
+
+
+#### Returns:
+
+
+* `assign_op`: The op corresponding to the assignment operation of
+variables to their average.
+
+
+#### Example:
+
+
+```python
+model = tf.Sequential([...])
+opt = tfa.optimizers.SWA(
+ tf.keras.optimizers.SGD(lr=2.0), 100, 10)
+model.compile(opt, ...)
+model.fit(x, y, ...)
+
+# Update the weights to their mean before saving
+opt.assign_average_vars(model.variables)
+
+model.save('model.h5')
+```
+
+average_op
+
+View source
+
+``` python
+average_op(
+ var,
+ average_var
+)
+```
+
+
+
+
+from_config
+
+View source
+
+``` python
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates an optimizer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same optimizer from the config
+dictionary.
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the output of get_config.
+* `custom_objects`: A Python dictionary mapping names to additional Python
+ objects used to create this optimizer, such as a function used for a
+ hyperparameter.
+
+
+#### Returns:
+
+An optimizer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the optimimizer.
+
+An optimizer config is a Python dictionary (serializable)
+containing the configuration of an optimizer.
+The same optimizer can be reinstantiated later
+(without any saved state) from this configuration.
+
+#### Returns:
+
+Python dictionary.
+
+
+get_gradients
+
+``` python
+get_gradients(
+ loss,
+ params
+)
+```
+
+Returns gradients of `loss` with respect to `params`.
+
+
+#### Arguments:
+
+
+* `loss`: Loss tensor.
+* `params`: List of variables.
+
+
+#### Returns:
+
+List of gradient tensors.
+
+
+
+#### Raises:
+
+
+* `ValueError`: In case any gradient cannot be computed (e.g. if gradient
+ function not implemented).
+
+get_slot
+
+``` python
+get_slot(
+ var,
+ slot_name
+)
+```
+
+
+
+
+get_slot_names
+
+``` python
+get_slot_names()
+```
+
+A list of names for this optimizer's slots.
+
+
+get_updates
+
+``` python
+get_updates(
+ loss,
+ params
+)
+```
+
+
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+
+
+
+minimize
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before applying
+then call `tf.GradientTape` and `apply_gradients()` explicitly instead
+of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to minimize.
+* `var_list`: list or tuple of `Variable` objects to update to minimize
+ `loss`, or a callable returning the list or tuple of `Variable` objects.
+ Use callable when the variable list would otherwise be incomplete before
+ `minimize` since the variables are created at the first time `loss` is
+ called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for `loss`.
+* `name`: Optional name for the returned operation.
+
+
+#### Returns:
+
+An `Operation` that updates the variables in `var_list`. The `iterations`
+will be automatically increased by 1.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+
+
+
+variables
+
+``` python
+variables()
+```
+
+Returns variables of this Optimizer based on the order created.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/Triangular2CyclicalLearningRate.md b/docs/api_docs/python/tfa/optimizers/Triangular2CyclicalLearningRate.md
new file mode 100644
index 0000000000..1bb4b62c31
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/Triangular2CyclicalLearningRate.md
@@ -0,0 +1,149 @@
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.Triangular2CyclicalLearningRate
+
+
+
+
+
+
+
+
+## Class `Triangular2CyclicalLearningRate`
+
+A LearningRateSchedule that uses cyclical schedule.
+
+Inherits From: [`CyclicalLearningRate`](../../tfa/optimizers/CyclicalLearningRate.md)
+
+**Aliases**: `tfa.optimizers.cyclical_learning_rate.Triangular2CyclicalLearningRate`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ initial_learning_rate,
+ maximal_learning_rate,
+ step_size,
+ scale_mode='cycle',
+ name='Triangular2CyclicalLearningRate'
+)
+```
+
+Applies triangular2 cyclical schedule to the learning rate.
+
+See Cyclical Learning Rates for Training Neural Networks. https://arxiv.org/abs/1506.01186
+
+
+```python
+from tf.keras.optimizers import schedules
+
+lr_schedule = schedules.Triangular2CyclicalLearningRate(
+ initial_learning_rate=1e-4,
+ maximal_learning_rate=1e-2,
+ step_size=2000,
+ scale_mode="cycle",
+ name="MyCyclicScheduler")
+
+model.compile(optimizer=tf.keras.optimizers.SGD(
+ learning_rate=lr_schedule),
+ loss='sparse_categorical_crossentropy',
+ metrics=['accuracy'])
+
+model.fit(data, labels, epochs=5)
+```
+
+You can pass this schedule directly into a
+`tf.keras.optimizers.Optimizer` as the learning rate.
+
+#### Args:
+
+
+* `initial_learning_rate`: A scalar `float32` or `float64` `Tensor` or
+ a Python number. The initial learning rate.
+* `maximal_learning_rate`: A scalar `float32` or `float64` `Tensor` or
+ a Python number. The maximum learning rate.
+* `step_size`: A scalar `float32` or `float64` `Tensor` or a
+ Python number. Step size.
+* `scale_fn`: A function. Scheduling function applied in cycle
+* `scale_mode`: ['cycle', 'iterations']. Mode to apply during cyclic
+ schedule
+* `name`: (Optional) Name for the operation.
+
+
+#### Returns:
+
+Updated learning rate value.
+
+
+
+
+## Methods
+
+__call__
+
+View source
+
+``` python
+__call__(step)
+```
+
+Call self as a function.
+
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `LearningRateSchedule` from its config.
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `LearningRateSchedule` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/TriangularCyclicalLearningRate.md b/docs/api_docs/python/tfa/optimizers/TriangularCyclicalLearningRate.md
new file mode 100644
index 0000000000..e4e69e8cc1
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/TriangularCyclicalLearningRate.md
@@ -0,0 +1,149 @@
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.TriangularCyclicalLearningRate
+
+
+
+
+
+
+
+
+## Class `TriangularCyclicalLearningRate`
+
+A LearningRateSchedule that uses cyclical schedule.
+
+Inherits From: [`CyclicalLearningRate`](../../tfa/optimizers/CyclicalLearningRate.md)
+
+**Aliases**: `tfa.optimizers.cyclical_learning_rate.TriangularCyclicalLearningRate`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ initial_learning_rate,
+ maximal_learning_rate,
+ step_size,
+ scale_mode='cycle',
+ name='TriangularCyclicalLearningRate'
+)
+```
+
+Applies triangular cyclical schedule to the learning rate.
+
+See Cyclical Learning Rates for Training Neural Networks. https://arxiv.org/abs/1506.01186
+
+
+```python
+from tf.keras.optimizers import schedules
+
+lr_schedule = schedules.TriangularCyclicalLearningRate(
+ initial_learning_rate=1e-4,
+ maximal_learning_rate=1e-2,
+ step_size=2000,
+ scale_mode="cycle",
+ name="MyCyclicScheduler")
+
+model.compile(optimizer=tf.keras.optimizers.SGD(
+ learning_rate=lr_schedule),
+ loss='sparse_categorical_crossentropy',
+ metrics=['accuracy'])
+
+model.fit(data, labels, epochs=5)
+```
+
+You can pass this schedule directly into a
+`tf.keras.optimizers.Optimizer` as the learning rate.
+
+#### Args:
+
+
+* `initial_learning_rate`: A scalar `float32` or `float64` `Tensor` or
+ a Python number. The initial learning rate.
+* `maximal_learning_rate`: A scalar `float32` or `float64` `Tensor` or
+ a Python number. The maximum learning rate.
+* `step_size`: A scalar `float32` or `float64` `Tensor` or a
+ Python number. Step size.
+* `scale_fn`: A function. Scheduling function applied in cycle
+* `scale_mode`: ['cycle', 'iterations']. Mode to apply during cyclic
+ schedule
+* `name`: (Optional) Name for the operation.
+
+
+#### Returns:
+
+Updated learning rate value.
+
+
+
+
+## Methods
+
+__call__
+
+View source
+
+``` python
+__call__(step)
+```
+
+Call self as a function.
+
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `LearningRateSchedule` from its config.
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `LearningRateSchedule` instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/average_wrapper.md b/docs/api_docs/python/tfa/optimizers/average_wrapper.md
new file mode 100644
index 0000000000..f8e157dbd4
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/average_wrapper.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.optimizers.average_wrapper
+
+
+
+
+
+
+
+
+
+
+## Classes
+
+[`class AveragedOptimizerWrapper`](../../tfa/optimizers/AveragedOptimizerWrapper.md): Updated base class for optimizers.
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/conditional_gradient.md b/docs/api_docs/python/tfa/optimizers/conditional_gradient.md
new file mode 100644
index 0000000000..d4d36b8f68
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/conditional_gradient.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.optimizers.conditional_gradient
+
+
+
+
+
+
+Conditional Gradient optimizer.
+
+
+
+## Classes
+
+[`class ConditionalGradient`](../../tfa/optimizers/ConditionalGradient.md): Optimizer that implements the Conditional Gradient optimization.
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/cyclical_learning_rate.md b/docs/api_docs/python/tfa/optimizers/cyclical_learning_rate.md
new file mode 100644
index 0000000000..dc9e8f5957
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/cyclical_learning_rate.md
@@ -0,0 +1,35 @@
+
+
+
+
+
+# Module: tfa.optimizers.cyclical_learning_rate
+
+
+
+
+
+
+Cyclical Learning Rate Schedule policies for TensorFlow.
+
+
+
+## Classes
+
+[`class CyclicalLearningRate`](../../tfa/optimizers/CyclicalLearningRate.md): A LearningRateSchedule that uses cyclical schedule.
+
+[`class ExponentialCyclicalLearningRate`](../../tfa/optimizers/ExponentialCyclicalLearningRate.md): A LearningRateSchedule that uses cyclical schedule.
+
+[`class Triangular2CyclicalLearningRate`](../../tfa/optimizers/Triangular2CyclicalLearningRate.md): A LearningRateSchedule that uses cyclical schedule.
+
+[`class TriangularCyclicalLearningRate`](../../tfa/optimizers/TriangularCyclicalLearningRate.md): A LearningRateSchedule that uses cyclical schedule.
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/extend_with_decoupled_weight_decay.md b/docs/api_docs/python/tfa/optimizers/extend_with_decoupled_weight_decay.md
new file mode 100644
index 0000000000..7fadcd9a83
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/extend_with_decoupled_weight_decay.md
@@ -0,0 +1,94 @@
+
+
+
+
+
+# tfa.optimizers.extend_with_decoupled_weight_decay
+
+
+
+
+
+
+
+
+Factory function returning an optimizer class with decoupled weight
+
+**Aliases**: `tfa.optimizers.weight_decay_optimizers.extend_with_decoupled_weight_decay`
+
+``` python
+tfa.optimizers.extend_with_decoupled_weight_decay(base_optimizer)
+```
+
+
+
+
+decay.
+
+Returns an optimizer class. An instance of the returned class computes the
+update step of `base_optimizer` and additionally decays the weights.
+E.g., the class returned by
+`extend_with_decoupled_weight_decay(tf.keras.optimizers.Adam)` is
+equivalent to tfa.optimizers.AdamW
.
+
+The API of the new optimizer class slightly differs from the API of the
+base optimizer:
+- The first argument to the constructor is the weight decay rate.
+- `minimize` and `apply_gradients` accept the optional keyword argument
+ `decay_var_list`, which specifies the variables that should be decayed.
+ If `None`, all variables that are optimized are decayed.
+
+#### Usage example:
+
+
+```python
+# MyAdamW is a new class
+MyAdamW = extend_with_decoupled_weight_decay(tf.keras.optimizers.Adam)
+# Create a MyAdamW object
+optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
+# update var1, var2 but only decay var1
+optimizer.minimize(loss, var_list=[var1, var2], decay_variables=[var1])
+
+Note: this extension decays weights BEFORE applying the update based
+on the gradient, i.e. this extension only has the desired behaviour for
+optimizers which do not depend on the value of 'var' in the update step!
+
+Note: when applying a decay to the learning rate, be sure to manually apply
+the decay to the `weight_decay` as well. For example:
+
+```python
+step = tf.Variable(0, trainable=False)
+schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
+ [10000, 15000], [1e-0, 1e-1, 1e-2])
+# lr and wd can be a function or a tensor
+lr = 1e-1 * schedule(step)
+wd = lambda: 1e-4 * schedule(step)
+
+# ...
+
+optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
+```
+
+Note: you might want to register your own custom optimizer using
+`tf.keras.utils.get_custom_objects()`.
+
+#### Args:
+
+
+* `base_optimizer`: An optimizer class that inherits from
+ tf.optimizers.Optimizer.
+
+
+#### Returns:
+
+A new optimizer class that inherits from DecoupledWeightDecayExtension
+and base_optimizer.
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/lamb.md b/docs/api_docs/python/tfa/optimizers/lamb.md
new file mode 100644
index 0000000000..760ebf0db5
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/lamb.md
@@ -0,0 +1,31 @@
+
+
+
+
+
+# Module: tfa.optimizers.lamb
+
+
+
+
+
+
+Layer-wise Adaptive Moments (LAMB) optimizer.
+
+
+See paper [Large Batch Optimization for Deep Learning: Training BERT in
+76 minutes](https://arxiv.org/abs/1904.00962).
+
+## Classes
+
+[`class LAMB`](../../tfa/optimizers/LAMB.md): Optimizer that implements the Layer-wise Adaptive Moments (LAMB).
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/lazy_adam.md b/docs/api_docs/python/tfa/optimizers/lazy_adam.md
new file mode 100644
index 0000000000..4e26e8d7e9
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/lazy_adam.md
@@ -0,0 +1,33 @@
+
+
+
+
+
+# Module: tfa.optimizers.lazy_adam
+
+
+
+
+
+
+Variant of the Adam optimizer that handles sparse updates more efficiently.
+
+
+Compared with the original Adam optimizer, the one in this file can
+provide a large improvement in model training throughput for some
+applications. However, it provides slightly different semantics than the
+original Adam algorithm, and may lead to different empirical results.
+
+## Classes
+
+[`class LazyAdam`](../../tfa/optimizers/LazyAdam.md): Variant of the Adam optimizer that handles sparse updates more
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/lookahead.md b/docs/api_docs/python/tfa/optimizers/lookahead.md
new file mode 100644
index 0000000000..983e3ecf5b
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/lookahead.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.optimizers.lookahead
+
+
+
+
+
+
+
+
+
+
+## Classes
+
+[`class Lookahead`](../../tfa/optimizers/Lookahead.md): This class allows to extend optimizers with the lookahead mechanism.
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/moving_average.md b/docs/api_docs/python/tfa/optimizers/moving_average.md
new file mode 100644
index 0000000000..1bf4bebcf2
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/moving_average.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.optimizers.moving_average
+
+
+
+
+
+
+
+
+
+
+## Classes
+
+[`class MovingAverage`](../../tfa/optimizers/MovingAverage.md): Optimizer that computes a moving average of the variables.
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/rectified_adam.md b/docs/api_docs/python/tfa/optimizers/rectified_adam.md
new file mode 100644
index 0000000000..311097ac49
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/rectified_adam.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.optimizers.rectified_adam
+
+
+
+
+
+
+Rectified Adam (RAdam) optimizer.
+
+
+
+## Classes
+
+[`class RectifiedAdam`](../../tfa/optimizers/RectifiedAdam.md): Variant of the Adam optimizer whose adaptive learning rate is rectified
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/stochastic_weight_averaging.md b/docs/api_docs/python/tfa/optimizers/stochastic_weight_averaging.md
new file mode 100644
index 0000000000..c05799442b
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/stochastic_weight_averaging.md
@@ -0,0 +1,36 @@
+
+
+
+
+
+# Module: tfa.optimizers.stochastic_weight_averaging
+
+
+
+
+
+
+An implementation of the Stochastic Weight Averaging optimizer.
+
+
+The Stochastic Weight Averaging mechanism was proposed by Pavel Izmailov
+et. al in the paper [Averaging Weights Leads to Wider Optima and Better
+Generalization](https://arxiv.org/abs/1803.05407). The optimizer
+implements averaging of multiple points along the trajectory of SGD.
+This averaging has shown to improve model performance on validation/test
+sets whilst possibly causing a small increase in loss on the training
+set.
+
+## Classes
+
+[`class SWA`](../../tfa/optimizers/SWA.md): This class extends optimizers with Stochastic Weight Averaging (SWA).
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/weight_decay_optimizers.md b/docs/api_docs/python/tfa/optimizers/weight_decay_optimizers.md
new file mode 100644
index 0000000000..cf41b57238
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/weight_decay_optimizers.md
@@ -0,0 +1,37 @@
+
+
+
+
+
+# Module: tfa.optimizers.weight_decay_optimizers
+
+
+
+
+
+
+Base class to make optimizers weight decay ready.
+
+
+
+## Classes
+
+[`class AdamW`](../../tfa/optimizers/AdamW.md): Optimizer that implements the Adam algorithm with weight decay.
+
+[`class DecoupledWeightDecayExtension`](../../tfa/optimizers/weight_decay_optimizers/DecoupledWeightDecayExtension.md): This class allows to extend optimizers with decoupled weight decay.
+
+[`class SGDW`](../../tfa/optimizers/SGDW.md): Optimizer that implements the Momentum algorithm with weight_decay.
+
+## Functions
+
+[`extend_with_decoupled_weight_decay(...)`](../../tfa/optimizers/extend_with_decoupled_weight_decay.md): Factory function returning an optimizer class with decoupled weight
+
+
+
diff --git a/docs/api_docs/python/tfa/optimizers/weight_decay_optimizers/DecoupledWeightDecayExtension.md b/docs/api_docs/python/tfa/optimizers/weight_decay_optimizers/DecoupledWeightDecayExtension.md
new file mode 100644
index 0000000000..93def88db5
--- /dev/null
+++ b/docs/api_docs/python/tfa/optimizers/weight_decay_optimizers/DecoupledWeightDecayExtension.md
@@ -0,0 +1,206 @@
+
+
+
+
+
+
+
+
+
+# tfa.optimizers.weight_decay_optimizers.DecoupledWeightDecayExtension
+
+
+
+
+
+
+
+
+## Class `DecoupledWeightDecayExtension`
+
+This class allows to extend optimizers with decoupled weight decay.
+
+
+
+
+
+It implements the decoupled weight decay described by Loshchilov & Hutter
+(https://arxiv.org/pdf/1711.05101.pdf), in which the weight decay is
+decoupled from the optimization steps w.r.t. to the loss function.
+For SGD variants, this simplifies hyperparameter search since it decouples
+the settings of weight decay and learning rate.
+For adaptive gradient algorithms, it regularizes variables with large
+gradients more than L2 regularization would, which was shown to yield
+better training loss and generalization error in the paper above.
+
+This class alone is not an optimizer but rather extends existing
+optimizers with decoupled weight decay. We explicitly define the two
+examples used in the above paper (SGDW and AdamW), but in general this
+can extend any OptimizerX by using
+`extend_with_decoupled_weight_decay(
+ OptimizerX, weight_decay=weight_decay)`.
+In order for it to work, it must be the first class the Optimizer with
+weight decay inherits from, e.g.
+
+```python
+class AdamW(DecoupledWeightDecayExtension, tf.keras.optimizers.Adam):
+ def __init__(self, weight_decay, *args, **kwargs):
+ super(AdamW, self).__init__(weight_decay, *args, **kwargs).
+```
+
+Note: this extension decays weights BEFORE applying the update based
+on the gradient, i.e. this extension only has the desired behaviour for
+optimizers which do not depend on the value of'var' in the update step!
+
+Note: when applying a decay to the learning rate, be sure to manually apply
+the decay to the `weight_decay` as well. For example:
+
+```python
+step = tf.Variable(0, trainable=False)
+schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
+ [10000, 15000], [1e-0, 1e-1, 1e-2])
+# lr and wd can be a function or a tensor
+lr = 1e-1 * schedule(step)
+wd = lambda: 1e-4 * schedule(step)
+
+# ...
+
+optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
+```
+
+__init__
+
+View source
+
+``` python
+__init__(
+ weight_decay,
+ **kwargs
+)
+```
+
+Extension class that adds weight decay to an optimizer.
+
+
+#### Args:
+
+
+* `weight_decay`: A `Tensor` or a floating point value, the factor by
+ which a variable is decayed in the update step.
+* `**kwargs`: Optional list or tuple or set of `Variable` objects to
+ decay.
+
+
+
+## Methods
+
+apply_gradients
+
+View source
+
+``` python
+apply_gradients(
+ grads_and_vars,
+ name=None,
+ decay_var_list=None
+)
+```
+
+Apply gradients to variables.
+
+This is the second part of `minimize()`. It returns an `Operation` that
+applies gradients.
+
+#### Args:
+
+
+* `grads_and_vars`: List of (gradient, variable) pairs.
+* `name`: Optional name for the returned operation. Default to the
+ name passed to the `Optimizer` constructor.
+* `decay_var_list`: Optional list of variables to be decayed. Defaults
+ to all variables in var_list.
+
+#### Returns:
+
+An `Operation` that applies the specified gradients. If
+`global_step` was not None, that operation also increments
+`global_step`.
+
+
+#### Raises:
+
+
+* `TypeError`: If `grads_and_vars` is malformed.
+* `ValueError`: If none of the variables have gradients.
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+
+
+
+minimize
+
+View source
+
+``` python
+minimize(
+ loss,
+ var_list,
+ grad_loss=None,
+ name=None,
+ decay_var_list=None
+)
+```
+
+Minimize `loss` by updating `var_list`.
+
+This method simply computes gradient using `tf.GradientTape` and calls
+`apply_gradients()`. If you want to process the gradient before
+applying then call `tf.GradientTape` and `apply_gradients()` explicitly
+instead of using this function.
+
+#### Args:
+
+
+* `loss`: A callable taking no arguments which returns the value to
+ minimize.
+* `var_list`: list or tuple of `Variable` objects to update to
+ minimize `loss`, or a callable returning the list or tuple of
+ `Variable` objects. Use callable when the variable list would
+ otherwise be incomplete before `minimize` since the variables
+ are created at the first time `loss` is called.
+* `grad_loss`: Optional. A `Tensor` holding the gradient computed for
+ `loss`.
+* `decay_var_list`: Optional list of variables to be decayed. Defaults
+ to all variables in var_list.
+* `name`: Optional name for the returned operation.
+
+#### Returns:
+
+An Operation that updates the variables in `var_list`. If
+`global_step` was not `None`, that operation also increments
+`global_step`.
+
+
+#### Raises:
+
+
+* `ValueError`: If some of the variables are not `Variable` objects.
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/rnn.md b/docs/api_docs/python/tfa/rnn.md
new file mode 100644
index 0000000000..82e600503f
--- /dev/null
+++ b/docs/api_docs/python/tfa/rnn.md
@@ -0,0 +1,35 @@
+
+
+
+
+
+# Module: tfa.rnn
+
+
+
+
+
+
+Additional RNN cells that corform to Keras API.
+
+
+
+## Modules
+
+[`cell`](../tfa/rnn/cell.md) module: Module for RNN Cells.
+
+## Classes
+
+[`class LayerNormLSTMCell`](../tfa/rnn/LayerNormLSTMCell.md): LSTM cell with layer normalization and recurrent dropout.
+
+[`class NASCell`](../tfa/rnn/NASCell.md): Neural Architecture Search (NAS) recurrent network cell.
+
+
+
diff --git a/docs/api_docs/python/tfa/rnn/LayerNormLSTMCell.md b/docs/api_docs/python/tfa/rnn/LayerNormLSTMCell.md
new file mode 100644
index 0000000000..ab78ed9715
--- /dev/null
+++ b/docs/api_docs/python/tfa/rnn/LayerNormLSTMCell.md
@@ -0,0 +1,48 @@
+
+
+
+
+
+# tfa.rnn.LayerNormLSTMCell
+
+
+
+
+
+
+
+
+## Class `LayerNormLSTMCell`
+
+LSTM cell with layer normalization and recurrent dropout.
+
+
+
+**Aliases**: `tfa.rnn.cell.LayerNormLSTMCell`
+
+
+
+This class adds layer normalization and recurrent dropout to a LSTM unit.
+Layer normalization implementation is based on:
+
+ https://arxiv.org/abs/1607.06450.
+
+"Layer Normalization" Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
+
+and is applied before the internal nonlinearities.
+Recurrent dropout is based on:
+
+ https://arxiv.org/abs/1603.05118
+
+"Recurrent Dropout without Memory Loss"
+Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth.
+
+
+
diff --git a/docs/api_docs/python/tfa/rnn/NASCell.md b/docs/api_docs/python/tfa/rnn/NASCell.md
new file mode 100644
index 0000000000..f93d34c0c6
--- /dev/null
+++ b/docs/api_docs/python/tfa/rnn/NASCell.md
@@ -0,0 +1,891 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.rnn.NASCell
+
+
+
+
+
+
+
+
+## Class `NASCell`
+
+Neural Architecture Search (NAS) recurrent network cell.
+
+
+
+**Aliases**: `tfa.rnn.cell.NASCell`
+
+
+
+This implements the recurrent cell from the paper:
+
+ https://arxiv.org/abs/1611.01578
+
+Barret Zoph and Quoc V. Le.
+"Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017.
+
+The class uses an optional projection layer.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ units,
+ projection=None,
+ use_bias=False,
+ kernel_initializer='glorot_uniform',
+ recurrent_initializer='glorot_uniform',
+ projection_initializer='glorot_uniform',
+ bias_initializer='zeros',
+ **kwargs
+)
+```
+
+Initialize the parameters for a NAS cell.
+
+
+#### Args:
+
+
+* `units`: int, The number of units in the NAS cell.
+* `projection`: (optional) int, The output dimensionality for the
+ projection matrices. If None, no projection is performed.
+* `use_bias`: (optional) bool, If True then use biases within the cell.
+ This is False by default.
+* `kernel_initializer`: Initializer for kernel weight.
+* `recurrent_initializer`: Initializer for recurrent kernel weight.
+* `projection_initializer`: Initializer for projection weight, used when
+ projection is not None.
+* `bias_initializer`: Initializer for bias, used when use_bias is True.
+* `**kwargs`: Additional keyword arguments.
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+output_size
+
+Integer or TensorShape: size of outputs produced by this cell.
+
+
+state_size
+
+size(s) of state(s) used by this cell.
+
+It can be represented by an Integer, a TensorShape or a tuple of Integers
+or TensorShapes.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+View source
+
+``` python
+build(inputs_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+get_initial_state
+
+``` python
+get_initial_state(
+ inputs=None,
+ batch_size=None,
+ dtype=None
+)
+```
+
+
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/rnn/cell.md b/docs/api_docs/python/tfa/rnn/cell.md
new file mode 100644
index 0000000000..b59d2bfa31
--- /dev/null
+++ b/docs/api_docs/python/tfa/rnn/cell.md
@@ -0,0 +1,31 @@
+
+
+
+
+
+# Module: tfa.rnn.cell
+
+
+
+
+
+
+Module for RNN Cells.
+
+
+
+## Classes
+
+[`class LayerNormLSTMCell`](../../tfa/rnn/LayerNormLSTMCell.md): LSTM cell with layer normalization and recurrent dropout.
+
+[`class NASCell`](../../tfa/rnn/NASCell.md): Neural Architecture Search (NAS) recurrent network cell.
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq.md b/docs/api_docs/python/tfa/seq2seq.md
new file mode 100644
index 0000000000..6e7a4595b6
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq.md
@@ -0,0 +1,105 @@
+
+
+
+
+
+# Module: tfa.seq2seq
+
+
+
+
+
+
+Additional ops for building neural network sequence to sequence decoders and
+
+losses.
+
+## Modules
+
+[`attention_wrapper`](../tfa/seq2seq/attention_wrapper.md) module: A powerful dynamic attention wrapper object.
+
+[`basic_decoder`](../tfa/seq2seq/basic_decoder.md) module: A class of Decoders that may sample to generate the next input.
+
+[`beam_search_decoder`](../tfa/seq2seq/beam_search_decoder.md) module: A decoder that performs beam search.
+
+[`decoder`](../tfa/seq2seq/decoder.md) module: Seq2seq layer operations for use in neural networks.
+
+[`loss`](../tfa/seq2seq/loss.md) module: Seq2seq loss operations for use in sequence models.
+
+[`sampler`](../tfa/seq2seq/sampler.md) module: A library of sampler for use with SamplingDecoders.
+
+## Classes
+
+[`class AttentionMechanism`](../tfa/seq2seq/AttentionMechanism.md)
+
+[`class AttentionWrapper`](../tfa/seq2seq/AttentionWrapper.md): Wraps another `RNNCell` with attention.
+
+[`class AttentionWrapperState`](../tfa/seq2seq/AttentionWrapperState.md): `namedtuple` storing the state of a `AttentionWrapper`.
+
+[`class BahdanauAttention`](../tfa/seq2seq/BahdanauAttention.md): Implements Bahdanau-style (additive) attention.
+
+[`class BahdanauMonotonicAttention`](../tfa/seq2seq/BahdanauMonotonicAttention.md): Monotonic attention mechanism with Bahadanau-style energy function.
+
+[`class BaseDecoder`](../tfa/seq2seq/BaseDecoder.md): An RNN Decoder that is based on a Keras layer.
+
+[`class BasicDecoder`](../tfa/seq2seq/BasicDecoder.md): Basic sampling decoder.
+
+[`class BasicDecoderOutput`](../tfa/seq2seq/BasicDecoderOutput.md): BasicDecoderOutput(rnn_output, sample_id)
+
+[`class BeamSearchDecoder`](../tfa/seq2seq/BeamSearchDecoder.md): BeamSearch sampling decoder.
+
+[`class BeamSearchDecoderOutput`](../tfa/seq2seq/BeamSearchDecoderOutput.md): BeamSearchDecoderOutput(scores, predicted_ids, parent_ids)
+
+[`class BeamSearchDecoderState`](../tfa/seq2seq/BeamSearchDecoderState.md): BeamSearchDecoderState(cell_state, log_probs, finished, lengths, accumulated_attention_probs)
+
+[`class CustomSampler`](../tfa/seq2seq/CustomSampler.md): Base abstract class that allows the user to customize sampling.
+
+[`class Decoder`](../tfa/seq2seq/Decoder.md): An RNN Decoder abstract interface object.
+
+[`class FinalBeamSearchDecoderOutput`](../tfa/seq2seq/FinalBeamSearchDecoderOutput.md): Final outputs returned by the beam search after all decoding is
+
+[`class GreedyEmbeddingSampler`](../tfa/seq2seq/GreedyEmbeddingSampler.md): A sampler for use during inference.
+
+[`class InferenceSampler`](../tfa/seq2seq/InferenceSampler.md): A helper to use during inference with a custom sampling function.
+
+[`class LuongAttention`](../tfa/seq2seq/LuongAttention.md): Implements Luong-style (multiplicative) attention scoring.
+
+[`class LuongMonotonicAttention`](../tfa/seq2seq/LuongMonotonicAttention.md): Monotonic attention mechanism with Luong-style energy function.
+
+[`class SampleEmbeddingSampler`](../tfa/seq2seq/SampleEmbeddingSampler.md): A sampler for use during inference.
+
+[`class Sampler`](../tfa/seq2seq/Sampler.md): Interface for implementing sampling in seq2seq decoders.
+
+[`class ScheduledEmbeddingTrainingSampler`](../tfa/seq2seq/ScheduledEmbeddingTrainingSampler.md): A training sampler that adds scheduled sampling.
+
+[`class ScheduledOutputTrainingSampler`](../tfa/seq2seq/ScheduledOutputTrainingSampler.md): A training sampler that adds scheduled sampling directly to outputs.
+
+[`class SequenceLoss`](../tfa/seq2seq/SequenceLoss.md): Weighted cross-entropy loss for a sequence of logits.
+
+[`class TrainingSampler`](../tfa/seq2seq/TrainingSampler.md): A Sampler for use during training.
+
+## Functions
+
+[`dynamic_decode(...)`](../tfa/seq2seq/dynamic_decode.md): Perform dynamic decoding with `decoder`.
+
+[`gather_tree_from_array(...)`](../tfa/seq2seq/gather_tree_from_array.md): Calculates the full beams for `TensorArray`s.
+
+[`hardmax(...)`](../tfa/seq2seq/hardmax.md): Returns batched one-hot vectors.
+
+[`monotonic_attention(...)`](../tfa/seq2seq/monotonic_attention.md): Compute monotonic attention distribution from choosing probabilities.
+
+[`safe_cumprod(...)`](../tfa/seq2seq/safe_cumprod.md): Computes cumprod of x in logspace using cumsum to avoid underflow.
+
+[`sequence_loss(...)`](../tfa/seq2seq/sequence_loss.md): Weighted cross-entropy loss for a sequence of logits.
+
+[`tile_batch(...)`](../tfa/seq2seq/tile_batch.md): Tile the batch dimension of a (possibly nested structure of) tensor(s)
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/AttentionMechanism.md b/docs/api_docs/python/tfa/seq2seq/AttentionMechanism.md
new file mode 100644
index 0000000000..acfd8250de
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/AttentionMechanism.md
@@ -0,0 +1,50 @@
+
+
+
+
+
+
+
+# tfa.seq2seq.AttentionMechanism
+
+
+
+
+
+
+
+
+## Class `AttentionMechanism`
+
+
+
+
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.AttentionMechanism`
+
+
+
+
+## Properties
+
+alignments_size
+
+
+
+
+state_size
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/AttentionWrapper.md b/docs/api_docs/python/tfa/seq2seq/AttentionWrapper.md
new file mode 100644
index 0000000000..6325baccb3
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/AttentionWrapper.md
@@ -0,0 +1,993 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.AttentionWrapper
+
+
+
+
+
+
+
+
+## Class `AttentionWrapper`
+
+Wraps another `RNNCell` with attention.
+
+
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.AttentionWrapper`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ cell,
+ attention_mechanism,
+ attention_layer_size=None,
+ alignment_history=False,
+ cell_input_fn=None,
+ output_attention=True,
+ initial_cell_state=None,
+ name=None,
+ attention_layer=None,
+ attention_fn=None,
+ **kwargs
+)
+```
+
+Construct the `AttentionWrapper`.
+
+**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped
+in `AttentionWrapper`, then you must ensure that:
+
+- The encoder output has been tiled to `beam_width` via
+ tfa.seq2seq.tile_batch
(NOT `tf.tile`).
+- The `batch_size` argument passed to the `get_initial_state` method of
+ this wrapper is equal to `true_batch_size * beam_width`.
+- The initial state created with `get_initial_state` above contains a
+ `cell_state` value containing properly tiled final state from the
+ encoder.
+
+#### An example:
+
+
+
+```
+tiled_encoder_outputs = tfa.seq2seq.tile_batch(
+ encoder_outputs, multiplier=beam_width)
+tiled_encoder_final_state = tfa.seq2seq.tile_batch(
+ encoder_final_state, multiplier=beam_width)
+tiled_sequence_length = tfa.seq2seq.tile_batch(
+ sequence_length, multiplier=beam_width)
+attention_mechanism = MyFavoriteAttentionMechanism(
+ num_units=attention_depth,
+ memory=tiled_inputs,
+ memory_sequence_length=tiled_sequence_length)
+attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
+decoder_initial_state = attention_cell.get_initial_state(
+ batch_size=true_batch_size * beam_width, dtype=dtype)
+decoder_initial_state = decoder_initial_state.clone(
+ cell_state=tiled_encoder_final_state)
+```
+
+#### Args:
+
+
+* `cell`: An instance of `RNNCell`.
+* `attention_mechanism`: A list of `AttentionMechanism` instances or a
+ single instance.
+* `attention_layer_size`: A list of Python integers or a single Python
+ integer, the depth of the attention (output) layer(s). If None
+ (default), use the context as attention at each time step.
+ Otherwise, feed the context and cell output into the attention
+ layer to generate attention at each time step. If
+ attention_mechanism is a list, attention_layer_size must be a list
+ of the same length. If attention_layer is set, this must be None.
+ If attention_fn is set, it must guaranteed that the outputs of
+ attention_fn also meet the above requirements.
+* `alignment_history`: Python boolean, whether to store alignment history
+ from all time steps in the final output state (currently stored as
+ a time major `TensorArray` on which you must call `stack()`).
+* `cell_input_fn`: (optional) A `callable`. The default is:
+ `lambda inputs, attention:
+ tf.concat([inputs, attention], -1)`.
+* `output_attention`: Python bool. If `True` (default), the output at
+ each time step is the attention value. This is the behavior of
+ Luong-style attention mechanisms. If `False`, the output at each
+ time step is the output of `cell`. This is the behavior of
+ Bhadanau-style attention mechanisms. In both cases, the
+ `attention` tensor is propagated to the next time step via the
+ state and is used there. This flag only controls whether the
+ attention mechanism is propagated up to the next cell in an RNN
+ stack or to the top RNN output.
+* `initial_cell_state`: The initial state value to use for the cell when
+ the user calls `get_initial_state()`. Note that if this value is
+ provided now, and the user uses a `batch_size` argument of
+ `get_initial_state` which does not match the batch size of
+ `initial_cell_state`, proper behavior is not guaranteed.
+* `name`: Name to use when creating ops.
+* `attention_layer`: A list of `tf.tf.keras.layers.Layer` instances or a
+ single `tf.tf.keras.layers.Layer` instance taking the context
+ and cell output as inputs to generate attention at each time step.
+ If None (default), use the context as attention at each time step.
+ If attention_mechanism is a list, attention_layer must be a list of
+ the same length. If attention_layers_size is set, this must be
+ None.
+* `attention_fn`: An optional callable function that allows users to
+ provide their own customized attention function, which takes input
+ (attention_mechanism, cell_output, attention_state,
+ attention_layer) and outputs (attention, alignments,
+ next_attention_state). If provided, the attention_layer_size should
+ be the size of the outputs of attention_fn.
+* `**kwargs`: Other keyword arguments for layer creation.
+
+
+#### Raises:
+
+
+* `TypeError`: `attention_layer_size` is not None and
+ (`attention_mechanism` is a list but `attention_layer_size` is not;
+ or vice versa).
+* `ValueError`: if `attention_layer_size` is not None,
+ `attention_mechanism` is a list, and its length does not match that
+ of `attention_layer_size`; if `attention_layer_size` and
+ `attention_layer` are set simultaneously.
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+output_size
+
+Integer or TensorShape: size of outputs produced by this cell.
+
+
+state_size
+
+The `state_size` property of `AttentionWrapper`.
+
+
+#### Returns:
+
+An `AttentionWrapperState` tuple containing shapes used
+by this object.
+
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+get_initial_state
+
+View source
+
+``` python
+get_initial_state(
+ inputs=None,
+ batch_size=None,
+ dtype=None
+)
+```
+
+Return an initial (zero) state tuple for this `AttentionWrapper`.
+
+**NOTE** Please see the initializer documentation for details of how
+to call `get_initial_state` if using an `AttentionWrapper` with a
+`BeamSearchDecoder`.
+
+#### Args:
+
+
+* `inputs`: The inputs that will be fed to this cell.
+* `batch_size`: `0D` integer tensor: the batch size.
+* `dtype`: The internal state data type.
+
+
+#### Returns:
+
+An `AttentionWrapperState` tuple containing zeroed out tensors and,
+possibly, empty `TensorArray` objects.
+
+
+
+#### Raises:
+
+
+* `ValueError`: (or, possibly at runtime, InvalidArgument), if
+ `batch_size` does not match the output size of the encoder passed
+ to the wrapper object at initialization time.
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/AttentionWrapperState.md b/docs/api_docs/python/tfa/seq2seq/AttentionWrapperState.md
new file mode 100644
index 0000000000..482a9a98cc
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/AttentionWrapperState.md
@@ -0,0 +1,154 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.AttentionWrapperState
+
+
+
+
+
+
+
+
+## Class `AttentionWrapperState`
+
+`namedtuple` storing the state of a `AttentionWrapper`.
+
+
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.AttentionWrapperState`
+
+
+
+
+#### Contains:
+
+
+- `cell_state`: The state of the wrapped `RNNCell` at the previous time
+ step.
+- `attention`: The attention emitted at the previous time step.
+- `time`: int32 scalar containing the current time step.
+- `alignments`: A single or tuple of `Tensor`(s) containing the
+ alignments emitted at the previous time step for each attention
+ mechanism.
+- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
+ containing alignment matrices from all time steps for each attention
+ mechanism. Call `stack()` on each to convert to a `Tensor`.
+- `attention_state`: A single or tuple of nested objects
+ containing attention mechanism state for each attention mechanism.
+ The objects may contain Tensors or TensorArrays.
+
+
+__new__
+
+``` python
+__new__(
+ _cls,
+ cell_state,
+ attention,
+ time,
+ alignments,
+ alignment_history,
+ attention_state
+)
+```
+
+Create new instance of AttentionWrapperState(cell_state, attention, time, alignments, alignment_history, attention_state)
+
+
+
+
+## Properties
+
+cell_state
+
+
+
+
+attention
+
+
+
+
+time
+
+
+
+
+alignments
+
+
+
+
+alignment_history
+
+
+
+
+attention_state
+
+
+
+
+
+
+## Methods
+
+clone
+
+View source
+
+``` python
+clone(**kwargs)
+```
+
+Clone this object, overriding components provided by kwargs.
+
+The new state fields' shape must match original state fields' shape.
+This will be validated, and original fields' shape will be propagated
+to new fields.
+
+#### Example:
+
+
+
+```python
+initial_state = attention_wrapper.get_initial_state(
+ batch_size=..., dtype=...)
+initial_state = initial_state.clone(cell_state=encoder_state)
+```
+
+#### Args:
+
+
+* `**kwargs`: Any properties of the state object to replace in the
+ returned `AttentionWrapperState`.
+
+
+#### Returns:
+
+A new `AttentionWrapperState` whose properties are the same as
+this one, except any overridden properties as provided in `kwargs`.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/BahdanauAttention.md b/docs/api_docs/python/tfa/seq2seq/BahdanauAttention.md
new file mode 100644
index 0000000000..dc7ee84038
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/BahdanauAttention.md
@@ -0,0 +1,1021 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.BahdanauAttention
+
+
+
+
+
+
+
+
+## Class `BahdanauAttention`
+
+Implements Bahdanau-style (additive) attention.
+
+
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.BahdanauAttention`
+
+
+
+This attention has two forms. The first is Bahdanau attention,
+as described in:
+
+Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
+"Neural Machine Translation by Jointly Learning to Align and Translate."
+ICLR 2015. https://arxiv.org/abs/1409.0473
+
+The second is the normalized form. This form is inspired by the
+weight normalization article:
+
+Tim Salimans, Diederik P. Kingma.
+"Weight Normalization: A Simple Reparameterization to Accelerate
+ Training of Deep Neural Networks."
+https://arxiv.org/abs/1602.07868
+
+To enable the second form, construct the object with parameter
+`normalize=True`.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ units,
+ memory=None,
+ memory_sequence_length=None,
+ normalize=False,
+ probability_fn='softmax',
+ kernel_initializer='glorot_uniform',
+ dtype=None,
+ name='BahdanauAttention',
+ **kwargs
+)
+```
+
+Construct the Attention mechanism.
+
+
+#### Args:
+
+
+* `units`: The depth of the query mechanism.
+* `memory`: The memory to query; usually the output of an RNN encoder.
+ This tensor should be shaped `[batch_size, max_time, ...]`.
+* `memory_sequence_length`: (optional): Sequence lengths for the batch
+ entries in memory. If provided, the memory tensor rows are masked
+ with zeros for values past the respective sequence lengths.
+* `normalize`: Python boolean. Whether to normalize the energy term.
+* `probability_fn`: (optional) string, the name of function to convert
+ the attention score to probabilities. The default is `softmax`
+ which is `tf.nn.softmax`. Other options is `hardmax`, which is
+ hardmax() within this module. Any other value will result into
+ validation error. Default to use `softmax`.
+* `kernel_initializer`: (optional), the name of the initializer for the
+ attention kernel.
+* `dtype`: The data type for the query and memory layers of the attention
+ mechanism.
+* `name`: Name to use when creating ops.
+* `**kwargs`: Dictionary that contains other common arguments for layer
+ creation.
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+alignments_size
+
+
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+memory_initialized
+
+Returns `True` if this attention mechanism has been initialized with
+a memory.
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+state_size
+
+
+
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+View source
+
+``` python
+__call__(
+ inputs,
+ **kwargs
+)
+```
+
+Preprocess the inputs before calling `base_layer.__call__()`.
+
+Note that there are situation here, one for setup memory, and one with
+actual query and state.
+1. When the memory has not been configured, we just pass all the param
+ to base_layer.__call__(), which will then invoke self.call() with
+ proper inputs, which allows this class to setup memory.
+2. When the memory has already been setup, the input should contain
+ query and state, and optionally processed memory. If the processed
+ memory is not included in the input, we will have to append it to
+ the inputs and give it to the base_layer.__call__(). The processed
+ memory is the output of first invocation of self.__call__(). If we
+ don't add it here, then from keras perspective, the graph is
+ disconnected since the output from previous call is never used.
+
+#### Args:
+
+
+* `inputs`: the inputs tensors.
+* `**kwargs`: dict, other keyeword arguments for the `__call__()`
+
+build
+
+View source
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+View source
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+deserialize_inner_layer_from_config
+
+View source
+
+``` python
+deserialize_inner_layer_from_config(
+ cls,
+ config,
+ custom_objects
+)
+```
+
+Helper method that reconstruct the query and memory from the config.
+
+In the get_config() method, the query and memory layer configs are
+serialized into dict for persistence, this method perform the reverse
+action to reconstruct the layer from the config.
+
+#### Args:
+
+
+* `config`: dict, the configs that will be used to reconstruct the
+ object.
+* `custom_objects`: dict mapping class names (or function names) of
+ custom (non-Keras) objects to class/functions.
+
+#### Returns:
+
+
+* `config`: dict, the config with layer instance created, which is ready
+ to be used as init parameters.
+
+from_config
+
+View source
+
+``` python
+@classmethod
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+initial_alignments
+
+View source
+
+``` python
+initial_alignments(
+ batch_size,
+ dtype
+)
+```
+
+Creates the initial alignment values for the `AttentionWrapper`
+class.
+
+This is important for AttentionMechanisms that use the previous
+alignment to calculate the alignment at the next time step
+(e.g. monotonic attention).
+
+The default behavior is to return a tensor of all zeros.
+
+#### Args:
+
+
+* `batch_size`: `int32` scalar, the batch_size.
+* `dtype`: The `dtype`.
+
+
+#### Returns:
+
+A `dtype` tensor shaped `[batch_size, alignments_size]`
+(`alignments_size` is the values' `max_time`).
+
+
+initial_state
+
+View source
+
+``` python
+initial_state(
+ batch_size,
+ dtype
+)
+```
+
+Creates the initial state values for the `AttentionWrapper` class.
+
+This is important for AttentionMechanisms that use the previous
+alignment to calculate the alignment at the next time step
+(e.g. monotonic attention).
+
+The default behavior is to return the same output as
+initial_alignments.
+
+#### Args:
+
+
+* `batch_size`: `int32` scalar, the batch_size.
+* `dtype`: The `dtype`.
+
+
+#### Returns:
+
+A structure of all-zero tensors with shapes as described by
+`state_size`.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+setup_memory
+
+View source
+
+``` python
+setup_memory(
+ memory,
+ memory_sequence_length=None,
+ memory_mask=None
+)
+```
+
+Pre-process the memory before actually query the memory.
+
+This should only be called once at the first invocation of call().
+
+#### Args:
+
+
+* `memory`: The memory to query; usually the output of an RNN encoder.
+ This tensor should be shaped `[batch_size, max_time, ...]`.
+memory_sequence_length (optional): Sequence lengths for the batch
+ entries in memory. If provided, the memory tensor rows are masked
+ with zeros for values past the respective sequence lengths.
+* `memory_mask`: (Optional) The boolean tensor with shape `[batch_size,
+ max_time]`. For any value equal to False, the corresponding value
+ in memory should be ignored.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/BahdanauMonotonicAttention.md b/docs/api_docs/python/tfa/seq2seq/BahdanauMonotonicAttention.md
new file mode 100644
index 0000000000..65b9015c72
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/BahdanauMonotonicAttention.md
@@ -0,0 +1,1019 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.BahdanauMonotonicAttention
+
+
+
+
+
+
+
+
+## Class `BahdanauMonotonicAttention`
+
+Monotonic attention mechanism with Bahadanau-style energy function.
+
+
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.BahdanauMonotonicAttention`
+
+
+
+This type of attention enforces a monotonic constraint on the attention
+distributions; that is once the model attends to a given point in the
+memory it can't attend to any prior points at subsequence output timesteps.
+It achieves this by using the _monotonic_probability_fn instead of softmax
+to construct its attention distributions. Since the attention scores are
+passed through a sigmoid, a learnable scalar bias parameter is applied
+after the score function and before the sigmoid. Otherwise, it is
+equivalent to BahdanauAttention. This approach is proposed in
+
+Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
+"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
+ICML 2017. https://arxiv.org/abs/1704.00784
+
+__init__
+
+View source
+
+``` python
+__init__(
+ units,
+ memory=None,
+ memory_sequence_length=None,
+ normalize=False,
+ sigmoid_noise=0.0,
+ sigmoid_noise_seed=None,
+ score_bias_init=0.0,
+ mode='parallel',
+ kernel_initializer='glorot_uniform',
+ dtype=None,
+ name='BahdanauMonotonicAttention',
+ **kwargs
+)
+```
+
+Construct the Attention mechanism.
+
+
+#### Args:
+
+
+* `units`: The depth of the query mechanism.
+* `memory`: The memory to query; usually the output of an RNN encoder.
+ This tensor should be shaped `[batch_size, max_time, ...]`.
+* `memory_sequence_length`: (optional): Sequence lengths for the batch
+ entries in memory. If provided, the memory tensor rows are masked
+ with zeros for values past the respective sequence lengths.
+* `normalize`: Python boolean. Whether to normalize the energy term.
+* `sigmoid_noise`: Standard deviation of pre-sigmoid noise. See the
+ docstring for `_monotonic_probability_fn` for more information.
+* `sigmoid_noise_seed`: (optional) Random seed for pre-sigmoid noise.
+* `score_bias_init`: Initial value for score bias scalar. It's
+ recommended to initialize this to a negative value when the length
+ of the memory is large.
+* `mode`: How to compute the attention distribution. Must be one of
+ 'recursive', 'parallel', or 'hard'. See the docstring for
+ tfa.seq2seq.monotonic_attention
for more information.
+* `kernel_initializer`: (optional), the name of the initializer for the
+ attention kernel.
+* `dtype`: The data type for the query and memory layers of the attention
+ mechanism.
+* `name`: Name to use when creating ops.
+* `**kwargs`: Dictionary that contains other common arguments for layer
+ creation.
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+alignments_size
+
+
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+memory_initialized
+
+Returns `True` if this attention mechanism has been initialized with
+a memory.
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+state_size
+
+
+
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+View source
+
+``` python
+__call__(
+ inputs,
+ **kwargs
+)
+```
+
+Preprocess the inputs before calling `base_layer.__call__()`.
+
+Note that there are situation here, one for setup memory, and one with
+actual query and state.
+1. When the memory has not been configured, we just pass all the param
+ to base_layer.__call__(), which will then invoke self.call() with
+ proper inputs, which allows this class to setup memory.
+2. When the memory has already been setup, the input should contain
+ query and state, and optionally processed memory. If the processed
+ memory is not included in the input, we will have to append it to
+ the inputs and give it to the base_layer.__call__(). The processed
+ memory is the output of first invocation of self.__call__(). If we
+ don't add it here, then from keras perspective, the graph is
+ disconnected since the output from previous call is never used.
+
+#### Args:
+
+
+* `inputs`: the inputs tensors.
+* `**kwargs`: dict, other keyeword arguments for the `__call__()`
+
+build
+
+View source
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+View source
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+deserialize_inner_layer_from_config
+
+View source
+
+``` python
+deserialize_inner_layer_from_config(
+ cls,
+ config,
+ custom_objects
+)
+```
+
+Helper method that reconstruct the query and memory from the config.
+
+In the get_config() method, the query and memory layer configs are
+serialized into dict for persistence, this method perform the reverse
+action to reconstruct the layer from the config.
+
+#### Args:
+
+
+* `config`: dict, the configs that will be used to reconstruct the
+ object.
+* `custom_objects`: dict mapping class names (or function names) of
+ custom (non-Keras) objects to class/functions.
+
+#### Returns:
+
+
+* `config`: dict, the config with layer instance created, which is ready
+ to be used as init parameters.
+
+from_config
+
+View source
+
+``` python
+@classmethod
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+initial_alignments
+
+View source
+
+``` python
+initial_alignments(
+ batch_size,
+ dtype
+)
+```
+
+Creates the initial alignment values for the monotonic attentions.
+
+Initializes to dirac distributions, i.e.
+[1, 0, 0, ...memory length..., 0] for all entries in the batch.
+
+#### Args:
+
+
+* `batch_size`: `int32` scalar, the batch_size.
+* `dtype`: The `dtype`.
+
+
+#### Returns:
+
+A `dtype` tensor shaped `[batch_size, alignments_size]`
+(`alignments_size` is the values' `max_time`).
+
+
+initial_state
+
+View source
+
+``` python
+initial_state(
+ batch_size,
+ dtype
+)
+```
+
+Creates the initial state values for the `AttentionWrapper` class.
+
+This is important for AttentionMechanisms that use the previous
+alignment to calculate the alignment at the next time step
+(e.g. monotonic attention).
+
+The default behavior is to return the same output as
+initial_alignments.
+
+#### Args:
+
+
+* `batch_size`: `int32` scalar, the batch_size.
+* `dtype`: The `dtype`.
+
+
+#### Returns:
+
+A structure of all-zero tensors with shapes as described by
+`state_size`.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+setup_memory
+
+View source
+
+``` python
+setup_memory(
+ memory,
+ memory_sequence_length=None,
+ memory_mask=None
+)
+```
+
+Pre-process the memory before actually query the memory.
+
+This should only be called once at the first invocation of call().
+
+#### Args:
+
+
+* `memory`: The memory to query; usually the output of an RNN encoder.
+ This tensor should be shaped `[batch_size, max_time, ...]`.
+memory_sequence_length (optional): Sequence lengths for the batch
+ entries in memory. If provided, the memory tensor rows are masked
+ with zeros for values past the respective sequence lengths.
+* `memory_mask`: (Optional) The boolean tensor with shape `[batch_size,
+ max_time]`. For any value equal to False, the corresponding value
+ in memory should be ignored.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/BaseDecoder.md b/docs/api_docs/python/tfa/seq2seq/BaseDecoder.md
new file mode 100644
index 0000000000..b585d02747
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/BaseDecoder.md
@@ -0,0 +1,977 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.BaseDecoder
+
+
+
+
+
+
+
+
+## Class `BaseDecoder`
+
+An RNN Decoder that is based on a Keras layer.
+
+
+
+**Aliases**: `tfa.seq2seq.decoder.BaseDecoder`
+
+
+
+Concepts used by this interface:
+- `inputs`: (structure of) tensors and TensorArrays that is passed as input
+ to the RNNCell composing the decoder, at each time step.
+- `state`: (structure of) tensors and TensorArrays that is passed to the
+ RNNCell instance as the state.
+- `memory`: (sturecute of) tensors that is usually the full output of the
+ encoder, which will be used for the attention wrapper for the RNNCell.
+- `finished`: boolean tensor telling whether each sequence in the batch is
+ finished.
+- `training`: boolean whether it should behave in training mode or in
+ inference mode.
+- `outputs`: Instance of BasicDecoderOutput. Result of the decoding, at
+ each time step.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ output_time_major=False,
+ impute_finished=False,
+ maximum_iterations=None,
+ parallel_iterations=32,
+ swap_memory=False,
+ **kwargs
+)
+```
+
+
+
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+batch_size
+
+The batch size of input values.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_dtype
+
+A (possibly nested tuple of...) dtype[s].
+
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+output_size
+
+A (possibly nested tuple of...) integer[s] or `TensorShape`
+object[s].
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+tracks_own_finished
+
+Describes whether the Decoder keeps track of finished states.
+
+Most decoders will emit a true/false `finished` value independently
+at each time step. In this case, the `dynamic_decode` function keeps
+track of which batch entries are already finished, and performs a
+logical OR to insert new batches to the finished set.
+
+Some decoders, however, shuffle batches / beams between time steps and
+`dynamic_decode` will mix up the finished state across these entries
+because it does not track the reshuffle across time steps. In this
+case, it is up to the decoder to declare that it will keep track of its
+own finished state by setting this property to `True`.
+
+#### Returns:
+
+Python bool.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+finalize
+
+View source
+
+``` python
+finalize(
+ outputs,
+ final_state,
+ sequence_lengths
+)
+```
+
+
+
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+initialize
+
+View source
+
+``` python
+initialize(
+ inputs,
+ initial_state=None,
+ **kwargs
+)
+```
+
+Called before any decoding iterations.
+
+This methods must compute initial input values and initial state.
+
+#### Args:
+
+
+* `inputs`: (structure of) tensors that contains the input for the
+ decoder. In the normal case, it's a tensor with shape
+ [batch, timestep, embedding].
+* `initial_state`: (structure of) tensors that contains the initial state
+ for the RNNCell.
+* `**kwargs`: Other arguments that are passed in from layer.call()
+ method. It could contains item like input sequence_length, or
+ masking for input.
+
+
+#### Returns:
+
+`(finished, initial_inputs, initial_state)`: initial values of
+'finished' flags, inputs and state.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+step
+
+View source
+
+``` python
+step(
+ time,
+ inputs,
+ state,
+ training
+)
+```
+
+Called per step of decoding (but only once for dynamic decoding).
+
+
+#### Args:
+
+
+* `time`: Scalar `int32` tensor. Current step number.
+* `inputs`: RNNCell input (possibly nested tuple of) tensor[s] for this
+ time step.
+* `state`: RNNCell state (possibly nested tuple of) tensor[s] from
+ previous time step.
+* `training`: Python boolean. Indicates whether the layer should
+ behave in training mode or in inference mode.
+
+
+#### Returns:
+
+`(outputs, next_state, next_inputs, finished)`: `outputs` is an
+object containing the decoder output, `next_state` is a
+(structure of) state tensors and TensorArrays, `next_inputs` is the
+tensor that should be used as input for the next step, `finished` is
+a boolean tensor telling whether the sequence is complete, for each
+sequence in the batch.
+
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/BasicDecoder.md b/docs/api_docs/python/tfa/seq2seq/BasicDecoder.md
new file mode 100644
index 0000000000..e4efd2c479
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/BasicDecoder.md
@@ -0,0 +1,951 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.BasicDecoder
+
+
+
+
+
+
+
+
+## Class `BasicDecoder`
+
+Basic sampling decoder.
+
+Inherits From: [`BaseDecoder`](../../tfa/seq2seq/BaseDecoder.md)
+
+**Aliases**: `tfa.seq2seq.basic_decoder.BasicDecoder`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ cell,
+ sampler,
+ output_layer=None,
+ **kwargs
+)
+```
+
+Initialize BasicDecoder.
+
+
+#### Args:
+
+
+* `cell`: An `RNNCell` instance.
+* `sampler`: A `Sampler` instance.
+* `output_layer`: (Optional) An instance of `tf.layers.Layer`, i.e.,
+ `tf.layers.Dense`. Optional layer to apply to the RNN output prior
+ to storing the result or sampling.
+* `**kwargs`: Other keyward arguments for layer creation.
+
+
+#### Raises:
+
+
+* `TypeError`: if `cell`, `helper` or `output_layer` have an incorrect
+type.
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+batch_size
+
+The batch size of input values.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_dtype
+
+A (possibly nested tuple of...) dtype[s].
+
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+output_size
+
+A (possibly nested tuple of...) integer[s] or `TensorShape`
+object[s].
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+tracks_own_finished
+
+Describes whether the Decoder keeps track of finished states.
+
+Most decoders will emit a true/false `finished` value independently
+at each time step. In this case, the `dynamic_decode` function keeps
+track of which batch entries are already finished, and performs a
+logical OR to insert new batches to the finished set.
+
+Some decoders, however, shuffle batches / beams between time steps and
+`dynamic_decode` will mix up the finished state across these entries
+because it does not track the reshuffle across time steps. In this
+case, it is up to the decoder to declare that it will keep track of its
+own finished state by setting this property to `True`.
+
+#### Returns:
+
+Python bool.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+finalize
+
+View source
+
+``` python
+finalize(
+ outputs,
+ final_state,
+ sequence_lengths
+)
+```
+
+
+
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+initialize
+
+View source
+
+``` python
+initialize(
+ inputs,
+ initial_state=None,
+ **kwargs
+)
+```
+
+Initialize the decoder.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+step
+
+View source
+
+``` python
+step(
+ time,
+ inputs,
+ state,
+ training=None
+)
+```
+
+Perform a decoding step.
+
+
+#### Args:
+
+
+* `time`: scalar `int32` tensor.
+* `inputs`: A (structure of) input tensors.
+* `state`: A (structure of) state tensors and TensorArrays.
+* `training`: Python boolean.
+
+
+#### Returns:
+
+`(outputs, next_state, next_inputs, finished)`.
+
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/BasicDecoderOutput.md b/docs/api_docs/python/tfa/seq2seq/BasicDecoderOutput.md
new file mode 100644
index 0000000000..48493bf4a8
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/BasicDecoderOutput.md
@@ -0,0 +1,66 @@
+
+
+
+
+
+
+
+
+# tfa.seq2seq.BasicDecoderOutput
+
+
+
+
+
+
+
+
+## Class `BasicDecoderOutput`
+
+BasicDecoderOutput(rnn_output, sample_id)
+
+
+
+**Aliases**: `tfa.seq2seq.basic_decoder.BasicDecoderOutput`
+
+
+
+
+__new__
+
+``` python
+__new__(
+ _cls,
+ rnn_output,
+ sample_id
+)
+```
+
+Create new instance of BasicDecoderOutput(rnn_output, sample_id)
+
+
+
+
+## Properties
+
+rnn_output
+
+
+
+
+sample_id
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/BeamSearchDecoder.md b/docs/api_docs/python/tfa/seq2seq/BeamSearchDecoder.md
new file mode 100644
index 0000000000..4e55320140
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/BeamSearchDecoder.md
@@ -0,0 +1,1042 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.BeamSearchDecoder
+
+
+
+
+
+
+
+
+## Class `BeamSearchDecoder`
+
+BeamSearch sampling decoder.
+
+Inherits From: [`BeamSearchDecoderMixin`](../../tfa/seq2seq/beam_search_decoder/BeamSearchDecoderMixin.md), [`BaseDecoder`](../../tfa/seq2seq/BaseDecoder.md)
+
+**Aliases**: `tfa.seq2seq.beam_search_decoder.BeamSearchDecoder`
+
+
+
+**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
+`AttentionWrapper`, then you must ensure that:
+
+- The encoder output has been tiled to `beam_width` via
+ tfa.seq2seq.tile_batch
(NOT `tf.tile`).
+- The `batch_size` argument passed to the `get_initial_state` method of
+ this wrapper is equal to `true_batch_size * beam_width`.
+- The initial state created with `get_initial_state` above contains a
+ `cell_state` value containing properly tiled final state from the
+ encoder.
+
+#### An example:
+
+
+
+```
+tiled_encoder_outputs = tfa.seq2seq.tile_batch(
+ encoder_outputs, multiplier=beam_width)
+tiled_encoder_final_state = tfa.seq2seq.tile_batch(
+ encoder_final_state, multiplier=beam_width)
+tiled_sequence_length = tfa.seq2seq.tile_batch(
+ sequence_length, multiplier=beam_width)
+attention_mechanism = MyFavoriteAttentionMechanism(
+ num_units=attention_depth,
+ memory=tiled_inputs,
+ memory_sequence_length=tiled_sequence_length)
+attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
+decoder_initial_state = attention_cell.get_initial_state(
+ batch_size=true_batch_size * beam_width, dtype=dtype)
+decoder_initial_state = decoder_initial_state.clone(
+ cell_state=tiled_encoder_final_state)
+```
+
+Meanwhile, with `AttentionWrapper`, coverage penalty is suggested to use
+when computing scores (https://arxiv.org/pdf/1609.08144.pdf). It encourages
+the decoding to cover all inputs.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ cell,
+ beam_width,
+ embedding_fn=None,
+ output_layer=None,
+ length_penalty_weight=0.0,
+ coverage_penalty_weight=0.0,
+ reorder_tensor_arrays=True,
+ **kwargs
+)
+```
+
+Initialize the BeamSearchDecoder.
+
+
+#### Args:
+
+
+* `cell`: An `RNNCell` instance.
+* `beam_width`: Python integer, the number of beams.
+* `embedding_fn`: A callable that takes a vector tensor of `ids`
+ (argmax ids).
+* `output_layer`: (Optional) An instance of `tf.keras.layers.Layer`,
+ i.e., `tf.keras.layers.Dense`. Optional layer to apply to the RNN
+ output prior to storing the result or sampling.
+* `length_penalty_weight`: Float weight to penalize length. Disabled with
+ 0.0.
+* `coverage_penalty_weight`: Float weight to penalize the coverage of
+ source sentence. Disabled with 0.0.
+* `reorder_tensor_arrays`: If `True`, `TensorArray`s' elements within the
+ cell state will be reordered according to the beam search path. If
+ the `TensorArray` can be reordered, the stacked form will be
+ returned. Otherwise, the `TensorArray` will be returned as is. Set
+ this flag to `False` if the cell state contains `TensorArray`s that
+ are not amenable to reordering.
+* `**kwargs`: Dict, other keyword arguments for initialization.
+
+
+#### Raises:
+
+
+* `TypeError`: if `cell` is not an instance of `RNNCell`,
+ or `output_layer` is not an instance of `tf.keras.layers.Layer`.
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+batch_size
+
+
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_dtype
+
+A (possibly nested tuple of...) dtype[s].
+
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+output_size
+
+
+
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+tracks_own_finished
+
+The BeamSearchDecoder shuffles its beams and their finished state.
+
+For this reason, it conflicts with the `dynamic_decode` function's
+tracking of finished states. Setting this property to true avoids
+early stopping of decoding due to mismanagement of the finished state
+in `dynamic_decode`.
+
+#### Returns:
+
+`True`.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+finalize
+
+View source
+
+``` python
+finalize(
+ outputs,
+ final_state,
+ sequence_lengths
+)
+```
+
+Finalize and return the predicted_ids.
+
+
+#### Args:
+
+
+* `outputs`: An instance of BeamSearchDecoderOutput.
+* `final_state`: An instance of BeamSearchDecoderState. Passed through to
+ the output.
+* `sequence_lengths`: An `int64` tensor shaped
+ `[batch_size, beam_width]`. The sequence lengths determined for
+ each beam during decode. **NOTE** These are ignored; the updated
+ sequence lengths are stored in `final_state.lengths`.
+
+
+#### Returns:
+
+
+* `outputs`: An instance of `FinalBeamSearchDecoderOutput` where the
+ predicted_ids are the result of calling _gather_tree.
+* `final_state`: The same input instance of `BeamSearchDecoderState`.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+initialize
+
+View source
+
+``` python
+initialize(
+ embedding,
+ start_tokens,
+ end_token,
+ initial_state
+)
+```
+
+Initialize the decoder.
+
+
+#### Args:
+
+
+* `embedding`: A tensor from the embedding layer output, which is the
+ `params` argument for `embedding_lookup`.
+* `start_tokens`: `int32` vector shaped `[batch_size]`, the start tokens.
+* `end_token`: `int32` scalar, the token that marks end of decoding.
+* `initial_state`: A (possibly nested tuple of...) tensors and
+TensorArrays.
+
+#### Returns:
+
+`(finished, start_inputs, initial_state)`.
+
+
+#### Raises:
+
+
+* `ValueError`: If `start_tokens` is not a vector or `end_token` is not a
+ scalar.
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+step
+
+View source
+
+``` python
+step(
+ time,
+ inputs,
+ state,
+ training=None,
+ name=None
+)
+```
+
+Perform a decoding step.
+
+
+#### Args:
+
+
+* `time`: scalar `int32` tensor.
+* `inputs`: A (structure of) input tensors.
+* `state`: A (structure of) state tensors and TensorArrays.
+* `training`: Python boolean. Indicates whether the layer should
+ behave in training mode or in inference mode. Only relevant
+ when `dropout` or `recurrent_dropout` is used.
+* `name`: Name scope for any created operations.
+
+
+#### Returns:
+
+`(outputs, next_state, next_inputs, finished)`.
+
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/BeamSearchDecoderOutput.md b/docs/api_docs/python/tfa/seq2seq/BeamSearchDecoderOutput.md
new file mode 100644
index 0000000000..630aea5ee5
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/BeamSearchDecoderOutput.md
@@ -0,0 +1,73 @@
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.BeamSearchDecoderOutput
+
+
+
+
+
+
+
+
+## Class `BeamSearchDecoderOutput`
+
+BeamSearchDecoderOutput(scores, predicted_ids, parent_ids)
+
+
+
+**Aliases**: `tfa.seq2seq.beam_search_decoder.BeamSearchDecoderOutput`
+
+
+
+
+__new__
+
+``` python
+__new__(
+ _cls,
+ scores,
+ predicted_ids,
+ parent_ids
+)
+```
+
+Create new instance of BeamSearchDecoderOutput(scores, predicted_ids, parent_ids)
+
+
+
+
+## Properties
+
+scores
+
+
+
+
+predicted_ids
+
+
+
+
+parent_ids
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/BeamSearchDecoderState.md b/docs/api_docs/python/tfa/seq2seq/BeamSearchDecoderState.md
new file mode 100644
index 0000000000..1e9fab3e2d
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/BeamSearchDecoderState.md
@@ -0,0 +1,87 @@
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.BeamSearchDecoderState
+
+
+
+
+
+
+
+
+## Class `BeamSearchDecoderState`
+
+BeamSearchDecoderState(cell_state, log_probs, finished, lengths, accumulated_attention_probs)
+
+
+
+**Aliases**: `tfa.seq2seq.beam_search_decoder.BeamSearchDecoderState`
+
+
+
+
+__new__
+
+``` python
+__new__(
+ _cls,
+ cell_state,
+ log_probs,
+ finished,
+ lengths,
+ accumulated_attention_probs
+)
+```
+
+Create new instance of BeamSearchDecoderState(cell_state, log_probs, finished, lengths, accumulated_attention_probs)
+
+
+
+
+## Properties
+
+cell_state
+
+
+
+
+log_probs
+
+
+
+
+finished
+
+
+
+
+lengths
+
+
+
+
+accumulated_attention_probs
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/CustomSampler.md b/docs/api_docs/python/tfa/seq2seq/CustomSampler.md
new file mode 100644
index 0000000000..0e3ef7b8d3
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/CustomSampler.md
@@ -0,0 +1,167 @@
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.CustomSampler
+
+
+
+
+
+
+
+
+## Class `CustomSampler`
+
+Base abstract class that allows the user to customize sampling.
+
+Inherits From: [`Sampler`](../../tfa/seq2seq/Sampler.md)
+
+**Aliases**: `tfa.seq2seq.sampler.CustomSampler`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ initialize_fn,
+ sample_fn,
+ next_inputs_fn,
+ sample_ids_shape=None,
+ sample_ids_dtype=None
+)
+```
+
+Initializer.
+
+
+#### Args:
+
+
+* `initialize_fn`: callable that returns `(finished, next_inputs)` for
+ the first iteration.
+* `sample_fn`: callable that takes `(time, outputs, state)` and emits
+ tensor `sample_ids`.
+* `next_inputs_fn`: callable that takes
+ `(time, outputs, state, sample_ids)` and emits
+ `(finished, next_inputs, next_state)`.
+* `sample_ids_shape`: Either a list of integers, or a 1-D Tensor of type
+ `int32`, the shape of each value in the `sample_ids` batch.
+ Defaults to a scalar.
+* `sample_ids_dtype`: The dtype of the `sample_ids` tensor. Defaults to
+ int32.
+
+
+
+## Properties
+
+batch_size
+
+Batch size of tensor returned by `sample`.
+
+Returns a scalar int32 tensor. The return value might not
+available before the invocation of initialize(), in this case,
+ValueError is raised.
+
+sample_ids_dtype
+
+DType of tensor returned by `sample`.
+
+Returns a DType. The return value might not available before the
+invocation of initialize().
+
+sample_ids_shape
+
+Shape of tensor returned by `sample`, excluding the batch dimension.
+
+Returns a `TensorShape`. The return value might not available
+before the invocation of initialize().
+
+
+
+## Methods
+
+initialize
+
+View source
+
+``` python
+initialize(
+ inputs,
+ **kwargs
+)
+```
+
+initialize the sampler with the input tensors.
+
+This method suppose to be only invoke once before the calling other
+methods of the Sampler.
+
+#### Args:
+
+
+* `inputs`: A (structure of) input tensors, it could be a nested tuple or
+ a single tensor.
+* `**kwargs`: Other kwargs for initialization. It could contain tensors
+ like mask for inputs, or non tensor parameter.
+
+
+#### Returns:
+
+`(initial_finished, initial_inputs)`.
+
+
+
+
+View source
+
+``` python
+next_inputs(
+ time,
+ outputs,
+ state,
+ sample_ids
+)
+```
+
+Returns `(finished, next_inputs, next_state)`.
+
+
+sample
+
+View source
+
+``` python
+sample(
+ time,
+ outputs,
+ state
+)
+```
+
+Returns `sample_ids`.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/Decoder.md b/docs/api_docs/python/tfa/seq2seq/Decoder.md
new file mode 100644
index 0000000000..5b693aaedc
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/Decoder.md
@@ -0,0 +1,175 @@
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.Decoder
+
+
+
+
+
+
+
+
+## Class `Decoder`
+
+An RNN Decoder abstract interface object.
+
+
+
+**Aliases**: `tfa.seq2seq.decoder.Decoder`
+
+
+
+Concepts used by this interface:
+- `inputs`: (structure of) tensors and TensorArrays that is passed as input
+ to the RNNCell composing the decoder, at each time step.
+- `state`: (structure of) tensors and TensorArrays that is passed to the
+ RNNCell instance as the state.
+- `finished`: boolean tensor telling whether each sequence in the batch is
+ finished.
+- `training`: boolean whether it should behave in training mode or in
+ inference mode.
+- `outputs`: Instance of BasicDecoderOutput. Result of the decoding, at
+ each time step.
+
+## Properties
+
+batch_size
+
+The batch size of input values.
+
+
+output_dtype
+
+A (possibly nested tuple of...) dtype[s].
+
+
+output_size
+
+A (possibly nested tuple of...) integer[s] or `TensorShape`
+object[s].
+
+tracks_own_finished
+
+Describes whether the Decoder keeps track of finished states.
+
+Most decoders will emit a true/false `finished` value independently
+at each time step. In this case, the `dynamic_decode` function keeps
+track of which batch entries are already finished, and performs a
+logical OR to insert new batches to the finished set.
+
+Some decoders, however, shuffle batches / beams between time steps and
+`dynamic_decode` will mix up the finished state across these entries
+because it does not track the reshuffle across time steps. In this
+case, it is up to the decoder to declare that it will keep track of its
+own finished state by setting this property to `True`.
+
+#### Returns:
+
+Python bool.
+
+
+
+
+## Methods
+
+finalize
+
+View source
+
+``` python
+finalize(
+ outputs,
+ final_state,
+ sequence_lengths
+)
+```
+
+
+
+
+initialize
+
+View source
+
+``` python
+initialize(name=None)
+```
+
+Called before any decoding iterations.
+
+This methods must compute initial input values and initial state.
+
+#### Args:
+
+
+* `name`: Name scope for any created operations.
+
+
+#### Returns:
+
+`(finished, initial_inputs, initial_state)`: initial values of
+'finished' flags, inputs and state.
+
+
+step
+
+View source
+
+``` python
+step(
+ time,
+ inputs,
+ state,
+ training=None,
+ name=None
+)
+```
+
+Called per step of decoding (but only once for dynamic decoding).
+
+
+#### Args:
+
+
+* `time`: Scalar `int32` tensor. Current step number.
+* `inputs`: RNNCell input (possibly nested tuple of) tensor[s] for this
+ time step.
+* `state`: RNNCell state (possibly nested tuple of) tensor[s] from
+ previous time step.
+* `training`: Python boolean. Indicates whether the layer should behave
+ in training mode or in inference mode. Only relevant
+ when `dropout` or `recurrent_dropout` is used.
+* `name`: Name scope for any created operations.
+
+
+#### Returns:
+
+`(outputs, next_state, next_inputs, finished)`: `outputs` is an
+object containing the decoder output, `next_state` is a (structure
+of) state tensors and TensorArrays, `next_inputs` is the tensor that
+should be used as input for the next step, `finished` is a boolean
+tensor telling whether the sequence is complete, for each sequence in
+the batch.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/FinalBeamSearchDecoderOutput.md b/docs/api_docs/python/tfa/seq2seq/FinalBeamSearchDecoderOutput.md
new file mode 100644
index 0000000000..323e514e12
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/FinalBeamSearchDecoderOutput.md
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+# tfa.seq2seq.FinalBeamSearchDecoderOutput
+
+
+
+
+
+
+
+
+## Class `FinalBeamSearchDecoderOutput`
+
+Final outputs returned by the beam search after all decoding is
+
+
+
+**Aliases**: `tfa.seq2seq.beam_search_decoder.FinalBeamSearchDecoderOutput`
+
+
+finished.
+
+#### Args:
+
+
+* `predicted_ids`: The final prediction. A tensor of shape
+ `[batch_size, T, beam_width]` (or `[T, batch_size, beam_width]` if
+ `output_time_major` is True). Beams are ordered from best to worst.
+* `beam_search_decoder_output`: An instance of `BeamSearchDecoderOutput` that
+ describes the state of the beam search.
+
+__new__
+
+``` python
+__new__(
+ _cls,
+ predicted_ids,
+ beam_search_decoder_output
+)
+```
+
+Create new instance of FinalBeamDecoderOutput(predicted_ids, beam_search_decoder_output)
+
+
+
+
+## Properties
+
+predicted_ids
+
+
+
+
+beam_search_decoder_output
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/GreedyEmbeddingSampler.md b/docs/api_docs/python/tfa/seq2seq/GreedyEmbeddingSampler.md
new file mode 100644
index 0000000000..7477724e9d
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/GreedyEmbeddingSampler.md
@@ -0,0 +1,162 @@
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.GreedyEmbeddingSampler
+
+
+
+
+
+
+
+
+## Class `GreedyEmbeddingSampler`
+
+A sampler for use during inference.
+
+Inherits From: [`Sampler`](../../tfa/seq2seq/Sampler.md)
+
+**Aliases**: `tfa.seq2seq.sampler.GreedyEmbeddingSampler`
+
+
+
+Uses the argmax of the output (treated as logits) and passes the
+result through an embedding layer to get the next input.
+
+__init__
+
+View source
+
+``` python
+__init__(embedding_fn=None)
+```
+
+Initializer.
+
+
+#### Args:
+
+
+* `embedding_fn`: A optional callable that takes a vector tensor of `ids`
+ (argmax ids), or the `params` argument for `embedding_lookup`. The
+ returned tensor will be passed to the decoder input. Default to use
+ `tf.nn.embedding_lookup`.
+
+
+
+## Properties
+
+batch_size
+
+Batch size of tensor returned by `sample`.
+
+Returns a scalar int32 tensor. The return value might not
+available before the invocation of initialize(), in this case,
+ValueError is raised.
+
+sample_ids_dtype
+
+DType of tensor returned by `sample`.
+
+Returns a DType. The return value might not available before the
+invocation of initialize().
+
+sample_ids_shape
+
+Shape of tensor returned by `sample`, excluding the batch dimension.
+
+Returns a `TensorShape`. The return value might not available
+before the invocation of initialize().
+
+
+
+## Methods
+
+initialize
+
+View source
+
+``` python
+initialize(
+ embedding,
+ start_tokens=None,
+ end_token=None
+)
+```
+
+Initialize the GreedyEmbeddingSampler.
+
+
+#### Args:
+
+
+* `embedding`: tensor that contains embedding states matrix. It will be
+ used to generate generate outputs with start_tokens and end_tokens.
+ The embedding will be ignored if the embedding_fn has been provided
+ at __init__().
+* `start_tokens`: `int32` vector shaped `[batch_size]`, the start tokens.
+* `end_token`: `int32` scalar, the token that marks end of decoding.
+
+
+#### Returns:
+
+Tuple of two items: `(finished, self.start_inputs)`.
+
+
+#### Raises:
+
+
+* `ValueError`: if `start_tokens` is not a 1D tensor or `end_token` is
+ not a scalar.
+
+
+
+View source
+
+``` python
+next_inputs(
+ time,
+ outputs,
+ state,
+ sample_ids
+)
+```
+
+next_inputs_fn for GreedyEmbeddingHelper.
+
+
+sample
+
+View source
+
+``` python
+sample(
+ time,
+ outputs,
+ state
+)
+```
+
+sample for GreedyEmbeddingHelper.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/InferenceSampler.md b/docs/api_docs/python/tfa/seq2seq/InferenceSampler.md
new file mode 100644
index 0000000000..99630b9e36
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/InferenceSampler.md
@@ -0,0 +1,164 @@
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.InferenceSampler
+
+
+
+
+
+
+
+
+## Class `InferenceSampler`
+
+A helper to use during inference with a custom sampling function.
+
+Inherits From: [`Sampler`](../../tfa/seq2seq/Sampler.md)
+
+**Aliases**: `tfa.seq2seq.sampler.InferenceSampler`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ sample_fn,
+ sample_shape,
+ sample_dtype,
+ end_fn,
+ next_inputs_fn=None
+)
+```
+
+Initializer.
+
+
+#### Args:
+
+
+* `sample_fn`: A callable that takes `outputs` and emits tensor
+ `sample_ids`.
+* `sample_shape`: Either a list of integers, or a 1-D Tensor of type
+ `int32`, the shape of the each sample in the batch returned by
+ `sample_fn`.
+* `sample_dtype`: the dtype of the sample returned by `sample_fn`.
+* `end_fn`: A callable that takes `sample_ids` and emits a `bool` vector
+ shaped `[batch_size]` indicating whether each sample is an end
+ token.
+* `next_inputs_fn`: (Optional) A callable that takes `sample_ids` and
+ returns the next batch of inputs. If not provided, `sample_ids` is
+ used as the next batch of inputs.
+
+
+
+## Properties
+
+batch_size
+
+Batch size of tensor returned by `sample`.
+
+Returns a scalar int32 tensor. The return value might not
+available before the invocation of initialize(), in this case,
+ValueError is raised.
+
+sample_ids_dtype
+
+DType of tensor returned by `sample`.
+
+Returns a DType. The return value might not available before the
+invocation of initialize().
+
+sample_ids_shape
+
+Shape of tensor returned by `sample`, excluding the batch dimension.
+
+Returns a `TensorShape`. The return value might not available
+before the invocation of initialize().
+
+
+
+## Methods
+
+initialize
+
+View source
+
+``` python
+initialize(start_inputs)
+```
+
+initialize the sampler with the input tensors.
+
+This method suppose to be only invoke once before the calling other
+methods of the Sampler.
+
+#### Args:
+
+
+* `inputs`: A (structure of) input tensors, it could be a nested tuple or
+ a single tensor.
+* `**kwargs`: Other kwargs for initialization. It could contain tensors
+ like mask for inputs, or non tensor parameter.
+
+
+#### Returns:
+
+`(initial_finished, initial_inputs)`.
+
+
+
+
+View source
+
+``` python
+next_inputs(
+ time,
+ outputs,
+ state,
+ sample_ids
+)
+```
+
+Returns `(finished, next_inputs, next_state)`.
+
+
+sample
+
+View source
+
+``` python
+sample(
+ time,
+ outputs,
+ state
+)
+```
+
+Returns `sample_ids`.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/LuongAttention.md b/docs/api_docs/python/tfa/seq2seq/LuongAttention.md
new file mode 100644
index 0000000000..ed4e1cb9d6
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/LuongAttention.md
@@ -0,0 +1,1012 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.LuongAttention
+
+
+
+
+
+
+
+
+## Class `LuongAttention`
+
+Implements Luong-style (multiplicative) attention scoring.
+
+
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.LuongAttention`
+
+
+
+This attention has two forms. The first is standard Luong attention,
+as described in:
+
+Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
+[Effective Approaches to Attention-based Neural Machine Translation.
+EMNLP 2015.](https://arxiv.org/abs/1508.04025)
+
+The second is the scaled form inspired partly by the normalized form of
+Bahdanau attention.
+
+To enable the second form, construct the object with parameter
+`scale=True`.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ units,
+ memory=None,
+ memory_sequence_length=None,
+ scale=False,
+ probability_fn='softmax',
+ dtype=None,
+ name='LuongAttention',
+ **kwargs
+)
+```
+
+Construct the AttentionMechanism mechanism.
+
+
+#### Args:
+
+
+* `units`: The depth of the attention mechanism.
+* `memory`: The memory to query; usually the output of an RNN encoder.
+ This tensor should be shaped `[batch_size, max_time, ...]`.
+* `memory_sequence_length`: (optional): Sequence lengths for the batch
+ entries in memory. If provided, the memory tensor rows are masked
+ with zeros for values past the respective sequence lengths.
+* `scale`: Python boolean. Whether to scale the energy term.
+* `probability_fn`: (optional) string, the name of function to convert
+ the attention score to probabilities. The default is `softmax`
+ which is `tf.nn.softmax`. Other options is `hardmax`, which is
+ hardmax() within this module. Any other value will result
+ intovalidation error. Default to use `softmax`.
+* `dtype`: The data type for the memory layer of the attention mechanism.
+* `name`: Name to use when creating ops.
+* `**kwargs`: Dictionary that contains other common arguments for layer
+ creation.
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+alignments_size
+
+
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+memory_initialized
+
+Returns `True` if this attention mechanism has been initialized with
+a memory.
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+state_size
+
+
+
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+View source
+
+``` python
+__call__(
+ inputs,
+ **kwargs
+)
+```
+
+Preprocess the inputs before calling `base_layer.__call__()`.
+
+Note that there are situation here, one for setup memory, and one with
+actual query and state.
+1. When the memory has not been configured, we just pass all the param
+ to base_layer.__call__(), which will then invoke self.call() with
+ proper inputs, which allows this class to setup memory.
+2. When the memory has already been setup, the input should contain
+ query and state, and optionally processed memory. If the processed
+ memory is not included in the input, we will have to append it to
+ the inputs and give it to the base_layer.__call__(). The processed
+ memory is the output of first invocation of self.__call__(). If we
+ don't add it here, then from keras perspective, the graph is
+ disconnected since the output from previous call is never used.
+
+#### Args:
+
+
+* `inputs`: the inputs tensors.
+* `**kwargs`: dict, other keyeword arguments for the `__call__()`
+
+build
+
+View source
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+View source
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+deserialize_inner_layer_from_config
+
+View source
+
+``` python
+deserialize_inner_layer_from_config(
+ cls,
+ config,
+ custom_objects
+)
+```
+
+Helper method that reconstruct the query and memory from the config.
+
+In the get_config() method, the query and memory layer configs are
+serialized into dict for persistence, this method perform the reverse
+action to reconstruct the layer from the config.
+
+#### Args:
+
+
+* `config`: dict, the configs that will be used to reconstruct the
+ object.
+* `custom_objects`: dict mapping class names (or function names) of
+ custom (non-Keras) objects to class/functions.
+
+#### Returns:
+
+
+* `config`: dict, the config with layer instance created, which is ready
+ to be used as init parameters.
+
+from_config
+
+View source
+
+``` python
+@classmethod
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+initial_alignments
+
+View source
+
+``` python
+initial_alignments(
+ batch_size,
+ dtype
+)
+```
+
+Creates the initial alignment values for the `AttentionWrapper`
+class.
+
+This is important for AttentionMechanisms that use the previous
+alignment to calculate the alignment at the next time step
+(e.g. monotonic attention).
+
+The default behavior is to return a tensor of all zeros.
+
+#### Args:
+
+
+* `batch_size`: `int32` scalar, the batch_size.
+* `dtype`: The `dtype`.
+
+
+#### Returns:
+
+A `dtype` tensor shaped `[batch_size, alignments_size]`
+(`alignments_size` is the values' `max_time`).
+
+
+initial_state
+
+View source
+
+``` python
+initial_state(
+ batch_size,
+ dtype
+)
+```
+
+Creates the initial state values for the `AttentionWrapper` class.
+
+This is important for AttentionMechanisms that use the previous
+alignment to calculate the alignment at the next time step
+(e.g. monotonic attention).
+
+The default behavior is to return the same output as
+initial_alignments.
+
+#### Args:
+
+
+* `batch_size`: `int32` scalar, the batch_size.
+* `dtype`: The `dtype`.
+
+
+#### Returns:
+
+A structure of all-zero tensors with shapes as described by
+`state_size`.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+setup_memory
+
+View source
+
+``` python
+setup_memory(
+ memory,
+ memory_sequence_length=None,
+ memory_mask=None
+)
+```
+
+Pre-process the memory before actually query the memory.
+
+This should only be called once at the first invocation of call().
+
+#### Args:
+
+
+* `memory`: The memory to query; usually the output of an RNN encoder.
+ This tensor should be shaped `[batch_size, max_time, ...]`.
+memory_sequence_length (optional): Sequence lengths for the batch
+ entries in memory. If provided, the memory tensor rows are masked
+ with zeros for values past the respective sequence lengths.
+* `memory_mask`: (Optional) The boolean tensor with shape `[batch_size,
+ max_time]`. For any value equal to False, the corresponding value
+ in memory should be ignored.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/LuongMonotonicAttention.md b/docs/api_docs/python/tfa/seq2seq/LuongMonotonicAttention.md
new file mode 100644
index 0000000000..dae455fa1d
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/LuongMonotonicAttention.md
@@ -0,0 +1,1014 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.LuongMonotonicAttention
+
+
+
+
+
+
+
+
+## Class `LuongMonotonicAttention`
+
+Monotonic attention mechanism with Luong-style energy function.
+
+
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.LuongMonotonicAttention`
+
+
+
+This type of attention enforces a monotonic constraint on the attention
+distributions; that is once the model attends to a given point in the
+memory it can't attend to any prior points at subsequence output timesteps.
+It achieves this by using the _monotonic_probability_fn instead of softmax
+to construct its attention distributions. Otherwise, it is equivalent to
+LuongAttention. This approach is proposed in
+
+[Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
+"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
+ICML 2017.](https://arxiv.org/abs/1704.00784)
+
+__init__
+
+View source
+
+``` python
+__init__(
+ units,
+ memory=None,
+ memory_sequence_length=None,
+ scale=False,
+ sigmoid_noise=0.0,
+ sigmoid_noise_seed=None,
+ score_bias_init=0.0,
+ mode='parallel',
+ dtype=None,
+ name='LuongMonotonicAttention',
+ **kwargs
+)
+```
+
+Construct the Attention mechanism.
+
+
+#### Args:
+
+
+* `units`: The depth of the query mechanism.
+* `memory`: The memory to query; usually the output of an RNN encoder.
+ This tensor should be shaped `[batch_size, max_time, ...]`.
+* `memory_sequence_length`: (optional): Sequence lengths for the batch
+ entries in memory. If provided, the memory tensor rows are masked
+ with zeros for values past the respective sequence lengths.
+* `scale`: Python boolean. Whether to scale the energy term.
+* `sigmoid_noise`: Standard deviation of pre-sigmoid noise. See the
+ docstring for `_monotonic_probability_fn` for more information.
+* `sigmoid_noise_seed`: (optional) Random seed for pre-sigmoid noise.
+* `score_bias_init`: Initial value for score bias scalar. It's
+ recommended to initialize this to a negative value when the length
+ of the memory is large.
+* `mode`: How to compute the attention distribution. Must be one of
+ 'recursive', 'parallel', or 'hard'. See the docstring for
+ tfa.seq2seq.monotonic_attention
for more information.
+* `dtype`: The data type for the query and memory layers of the attention
+ mechanism.
+* `name`: Name to use when creating ops.
+* `**kwargs`: Dictionary that contains other common arguments for layer
+ creation.
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+alignments_size
+
+
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+memory_initialized
+
+Returns `True` if this attention mechanism has been initialized with
+a memory.
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+state_size
+
+
+
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+View source
+
+``` python
+__call__(
+ inputs,
+ **kwargs
+)
+```
+
+Preprocess the inputs before calling `base_layer.__call__()`.
+
+Note that there are situation here, one for setup memory, and one with
+actual query and state.
+1. When the memory has not been configured, we just pass all the param
+ to base_layer.__call__(), which will then invoke self.call() with
+ proper inputs, which allows this class to setup memory.
+2. When the memory has already been setup, the input should contain
+ query and state, and optionally processed memory. If the processed
+ memory is not included in the input, we will have to append it to
+ the inputs and give it to the base_layer.__call__(). The processed
+ memory is the output of first invocation of self.__call__(). If we
+ don't add it here, then from keras perspective, the graph is
+ disconnected since the output from previous call is never used.
+
+#### Args:
+
+
+* `inputs`: the inputs tensors.
+* `**kwargs`: dict, other keyeword arguments for the `__call__()`
+
+build
+
+View source
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+View source
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+deserialize_inner_layer_from_config
+
+View source
+
+``` python
+deserialize_inner_layer_from_config(
+ cls,
+ config,
+ custom_objects
+)
+```
+
+Helper method that reconstruct the query and memory from the config.
+
+In the get_config() method, the query and memory layer configs are
+serialized into dict for persistence, this method perform the reverse
+action to reconstruct the layer from the config.
+
+#### Args:
+
+
+* `config`: dict, the configs that will be used to reconstruct the
+ object.
+* `custom_objects`: dict mapping class names (or function names) of
+ custom (non-Keras) objects to class/functions.
+
+#### Returns:
+
+
+* `config`: dict, the config with layer instance created, which is ready
+ to be used as init parameters.
+
+from_config
+
+View source
+
+``` python
+@classmethod
+from_config(
+ cls,
+ config,
+ custom_objects=None
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+View source
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+initial_alignments
+
+View source
+
+``` python
+initial_alignments(
+ batch_size,
+ dtype
+)
+```
+
+Creates the initial alignment values for the monotonic attentions.
+
+Initializes to dirac distributions, i.e.
+[1, 0, 0, ...memory length..., 0] for all entries in the batch.
+
+#### Args:
+
+
+* `batch_size`: `int32` scalar, the batch_size.
+* `dtype`: The `dtype`.
+
+
+#### Returns:
+
+A `dtype` tensor shaped `[batch_size, alignments_size]`
+(`alignments_size` is the values' `max_time`).
+
+
+initial_state
+
+View source
+
+``` python
+initial_state(
+ batch_size,
+ dtype
+)
+```
+
+Creates the initial state values for the `AttentionWrapper` class.
+
+This is important for AttentionMechanisms that use the previous
+alignment to calculate the alignment at the next time step
+(e.g. monotonic attention).
+
+The default behavior is to return the same output as
+initial_alignments.
+
+#### Args:
+
+
+* `batch_size`: `int32` scalar, the batch_size.
+* `dtype`: The `dtype`.
+
+
+#### Returns:
+
+A structure of all-zero tensors with shapes as described by
+`state_size`.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+setup_memory
+
+View source
+
+``` python
+setup_memory(
+ memory,
+ memory_sequence_length=None,
+ memory_mask=None
+)
+```
+
+Pre-process the memory before actually query the memory.
+
+This should only be called once at the first invocation of call().
+
+#### Args:
+
+
+* `memory`: The memory to query; usually the output of an RNN encoder.
+ This tensor should be shaped `[batch_size, max_time, ...]`.
+memory_sequence_length (optional): Sequence lengths for the batch
+ entries in memory. If provided, the memory tensor rows are masked
+ with zeros for values past the respective sequence lengths.
+* `memory_mask`: (Optional) The boolean tensor with shape `[batch_size,
+ max_time]`. For any value equal to False, the corresponding value
+ in memory should be ignored.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/SampleEmbeddingSampler.md b/docs/api_docs/python/tfa/seq2seq/SampleEmbeddingSampler.md
new file mode 100644
index 0000000000..9cf4e2e08a
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/SampleEmbeddingSampler.md
@@ -0,0 +1,179 @@
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.SampleEmbeddingSampler
+
+
+
+
+
+
+
+
+## Class `SampleEmbeddingSampler`
+
+A sampler for use during inference.
+
+Inherits From: [`GreedyEmbeddingSampler`](../../tfa/seq2seq/GreedyEmbeddingSampler.md)
+
+**Aliases**: `tfa.seq2seq.sampler.SampleEmbeddingSampler`
+
+
+
+Uses sampling (from a distribution) instead of argmax and passes the
+result through an embedding layer to get the next input.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ embedding_fn=None,
+ softmax_temperature=None,
+ seed=None
+)
+```
+
+Initializer.
+
+
+#### Args:
+
+
+* `embedding_fn`: (Optional) A callable that takes a vector tensor of
+ `ids` (argmax ids), or the `params` argument for
+ `embedding_lookup`. The returned tensor will be passed to the
+ decoder input.
+* `softmax_temperature`: (Optional) `float32` scalar, value to divide the
+ logits by before computing the softmax. Larger values (above 1.0)
+ result in more random samples, while smaller values push the
+ sampling distribution towards the argmax. Must be strictly greater
+ than 0. Defaults to 1.0.
+* `seed`: (Optional) The sampling seed.
+
+
+#### Raises:
+
+
+* `ValueError`: if `start_tokens` is not a 1D tensor or `end_token` is
+ not a scalar.
+
+
+
+## Properties
+
+batch_size
+
+Batch size of tensor returned by `sample`.
+
+Returns a scalar int32 tensor. The return value might not
+available before the invocation of initialize(), in this case,
+ValueError is raised.
+
+sample_ids_dtype
+
+DType of tensor returned by `sample`.
+
+Returns a DType. The return value might not available before the
+invocation of initialize().
+
+sample_ids_shape
+
+Shape of tensor returned by `sample`, excluding the batch dimension.
+
+Returns a `TensorShape`. The return value might not available
+before the invocation of initialize().
+
+
+
+## Methods
+
+initialize
+
+View source
+
+``` python
+initialize(
+ embedding,
+ start_tokens=None,
+ end_token=None
+)
+```
+
+Initialize the GreedyEmbeddingSampler.
+
+
+#### Args:
+
+
+* `embedding`: tensor that contains embedding states matrix. It will be
+ used to generate generate outputs with start_tokens and end_tokens.
+ The embedding will be ignored if the embedding_fn has been provided
+ at __init__().
+* `start_tokens`: `int32` vector shaped `[batch_size]`, the start tokens.
+* `end_token`: `int32` scalar, the token that marks end of decoding.
+
+
+#### Returns:
+
+Tuple of two items: `(finished, self.start_inputs)`.
+
+
+#### Raises:
+
+
+* `ValueError`: if `start_tokens` is not a 1D tensor or `end_token` is
+ not a scalar.
+
+
+
+View source
+
+``` python
+next_inputs(
+ time,
+ outputs,
+ state,
+ sample_ids
+)
+```
+
+next_inputs_fn for GreedyEmbeddingHelper.
+
+
+sample
+
+View source
+
+``` python
+sample(
+ time,
+ outputs,
+ state
+)
+```
+
+sample for SampleEmbeddingHelper.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/Sampler.md b/docs/api_docs/python/tfa/seq2seq/Sampler.md
new file mode 100644
index 0000000000..773c669364
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/Sampler.md
@@ -0,0 +1,143 @@
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.Sampler
+
+
+
+
+
+
+
+
+## Class `Sampler`
+
+Interface for implementing sampling in seq2seq decoders.
+
+
+
+**Aliases**: `tfa.seq2seq.sampler.Sampler`
+
+
+
+Sampler instances are used by `BasicDecoder`. The normal usage of a sampler
+is like below:
+sampler = Sampler(init_args)
+(initial_finished, initial_inputs) = sampler.initialize(input_tensors)
+for time_step in range(time):
+ cell_output, cell_state = cell.call(cell_input, previous_state)
+ sample_ids = sampler.sample(time_step, cell_output, cell_state)
+ (finished, next_inputs, next_state) = sampler.next_inputs(
+ time_step,cell_output, cell_state)
+
+Note that all the tensor input should not be feed to Sampler as __init__()
+parameters, instead, they should be feed by decoders via initialize().
+
+## Properties
+
+batch_size
+
+Batch size of tensor returned by `sample`.
+
+Returns a scalar int32 tensor. The return value might not
+available before the invocation of initialize(), in this case,
+ValueError is raised.
+
+sample_ids_dtype
+
+DType of tensor returned by `sample`.
+
+Returns a DType. The return value might not available before the
+invocation of initialize().
+
+sample_ids_shape
+
+Shape of tensor returned by `sample`, excluding the batch dimension.
+
+Returns a `TensorShape`. The return value might not available
+before the invocation of initialize().
+
+
+
+## Methods
+
+initialize
+
+View source
+
+``` python
+initialize(
+ inputs,
+ **kwargs
+)
+```
+
+initialize the sampler with the input tensors.
+
+This method suppose to be only invoke once before the calling other
+methods of the Sampler.
+
+#### Args:
+
+
+* `inputs`: A (structure of) input tensors, it could be a nested tuple or
+ a single tensor.
+* `**kwargs`: Other kwargs for initialization. It could contain tensors
+ like mask for inputs, or non tensor parameter.
+
+
+#### Returns:
+
+`(initial_finished, initial_inputs)`.
+
+
+
+
+View source
+
+``` python
+next_inputs(
+ time,
+ outputs,
+ state,
+ sample_ids
+)
+```
+
+Returns `(finished, next_inputs, next_state)`.
+
+
+sample
+
+View source
+
+``` python
+sample(
+ time,
+ outputs,
+ state
+)
+```
+
+Returns `sample_ids`.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/ScheduledEmbeddingTrainingSampler.md b/docs/api_docs/python/tfa/seq2seq/ScheduledEmbeddingTrainingSampler.md
new file mode 100644
index 0000000000..006a57ddde
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/ScheduledEmbeddingTrainingSampler.md
@@ -0,0 +1,174 @@
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.ScheduledEmbeddingTrainingSampler
+
+
+
+
+
+
+
+
+## Class `ScheduledEmbeddingTrainingSampler`
+
+A training sampler that adds scheduled sampling.
+
+Inherits From: [`TrainingSampler`](../../tfa/seq2seq/TrainingSampler.md)
+
+**Aliases**: `tfa.seq2seq.sampler.ScheduledEmbeddingTrainingSampler`
+
+
+
+Returns -1s for sample_ids where no sampling took place; valid
+sample id values elsewhere.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ sampling_probability,
+ embedding_fn=None,
+ time_major=False,
+ seed=None,
+ scheduling_seed=None
+)
+```
+
+Initializer.
+
+
+#### Args:
+
+
+* `sampling_probability`: A `float32` 0-D or 1-D tensor: the probability
+ of sampling categorically from the output ids instead of reading
+ directly from the inputs.
+* `embedding_fn`: A callable that takes a vector tensor of `ids`
+ (argmax ids), or the `params` argument for `embedding_lookup`.
+* `time_major`: Python bool. Whether the tensors in `inputs` are time
+ major. If `False` (default), they are assumed to be batch major.
+* `seed`: The sampling seed.
+* `scheduling_seed`: The schedule decision rule sampling seed.
+
+
+#### Raises:
+
+
+* `ValueError`: if `sampling_probability` is not a scalar or vector.
+
+
+
+## Properties
+
+batch_size
+
+Batch size of tensor returned by `sample`.
+
+Returns a scalar int32 tensor. The return value might not
+available before the invocation of initialize(), in this case,
+ValueError is raised.
+
+sample_ids_dtype
+
+DType of tensor returned by `sample`.
+
+Returns a DType. The return value might not available before the
+invocation of initialize().
+
+sample_ids_shape
+
+Shape of tensor returned by `sample`, excluding the batch dimension.
+
+Returns a `TensorShape`. The return value might not available
+before the invocation of initialize().
+
+
+
+## Methods
+
+initialize
+
+View source
+
+``` python
+initialize(
+ inputs,
+ sequence_length=None,
+ mask=None,
+ embedding=None
+)
+```
+
+Initialize the TrainSampler.
+
+
+#### Args:
+
+
+* `inputs`: A (structure of) input tensors.
+* `sequence_length`: An int32 vector tensor.
+* `mask`: A boolean 2D tensor.
+
+
+#### Returns:
+
+(finished, next_inputs), a tuple of two items. The first item is a
+ boolean vector to indicate whether the item in the batch has
+ finished. The second item is the first slide of input data based on
+ the timestep dimension (usually the second dim of the input).
+
+
+
+
+View source
+
+``` python
+next_inputs(
+ time,
+ outputs,
+ state,
+ sample_ids
+)
+```
+
+Returns `(finished, next_inputs, next_state)`.
+
+
+sample
+
+View source
+
+``` python
+sample(
+ time,
+ outputs,
+ state
+)
+```
+
+Returns `sample_ids`.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/ScheduledOutputTrainingSampler.md b/docs/api_docs/python/tfa/seq2seq/ScheduledOutputTrainingSampler.md
new file mode 100644
index 0000000000..692951ca87
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/ScheduledOutputTrainingSampler.md
@@ -0,0 +1,173 @@
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.ScheduledOutputTrainingSampler
+
+
+
+
+
+
+
+
+## Class `ScheduledOutputTrainingSampler`
+
+A training sampler that adds scheduled sampling directly to outputs.
+
+Inherits From: [`TrainingSampler`](../../tfa/seq2seq/TrainingSampler.md)
+
+**Aliases**: `tfa.seq2seq.sampler.ScheduledOutputTrainingSampler`
+
+
+
+Returns False for sample_ids where no sampling took place; True
+elsewhere.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ sampling_probability,
+ time_major=False,
+ seed=None,
+ next_inputs_fn=None
+)
+```
+
+Initializer.
+
+
+#### Args:
+
+
+* `sampling_probability`: A `float32` scalar tensor: the probability of
+ sampling from the outputs instead of reading directly from the
+ inputs.
+* `time_major`: Python bool. Whether the tensors in `inputs` are time
+ major. If `False` (default), they are assumed to be batch major.
+* `seed`: The sampling seed.
+* `next_inputs_fn`: (Optional) callable to apply to the RNN outputs to
+ create the next input when sampling. If `None` (default), the RNN
+ outputs will be used as the next inputs.
+
+
+#### Raises:
+
+
+* `ValueError`: if `sampling_probability` is not a scalar or vector.
+
+
+
+## Properties
+
+batch_size
+
+Batch size of tensor returned by `sample`.
+
+Returns a scalar int32 tensor. The return value might not
+available before the invocation of initialize(), in this case,
+ValueError is raised.
+
+sample_ids_dtype
+
+DType of tensor returned by `sample`.
+
+Returns a DType. The return value might not available before the
+invocation of initialize().
+
+sample_ids_shape
+
+Shape of tensor returned by `sample`, excluding the batch dimension.
+
+Returns a `TensorShape`. The return value might not available
+before the invocation of initialize().
+
+
+
+## Methods
+
+initialize
+
+View source
+
+``` python
+initialize(
+ inputs,
+ sequence_length=None,
+ mask=None,
+ auxiliary_inputs=None
+)
+```
+
+Initialize the TrainSampler.
+
+
+#### Args:
+
+
+* `inputs`: A (structure of) input tensors.
+* `sequence_length`: An int32 vector tensor.
+* `mask`: A boolean 2D tensor.
+
+
+#### Returns:
+
+(finished, next_inputs), a tuple of two items. The first item is a
+ boolean vector to indicate whether the item in the batch has
+ finished. The second item is the first slide of input data based on
+ the timestep dimension (usually the second dim of the input).
+
+
+
+
+View source
+
+``` python
+next_inputs(
+ time,
+ outputs,
+ state,
+ sample_ids
+)
+```
+
+Returns `(finished, next_inputs, next_state)`.
+
+
+sample
+
+View source
+
+``` python
+sample(
+ time,
+ outputs,
+ state
+)
+```
+
+Returns `sample_ids`.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/SequenceLoss.md b/docs/api_docs/python/tfa/seq2seq/SequenceLoss.md
new file mode 100644
index 0000000000..0e52c84b15
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/SequenceLoss.md
@@ -0,0 +1,109 @@
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.SequenceLoss
+
+
+
+
+
+
+
+
+## Class `SequenceLoss`
+
+Weighted cross-entropy loss for a sequence of logits.
+
+
+
+**Aliases**: `tfa.seq2seq.loss.SequenceLoss`
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ average_across_timesteps=False,
+ average_across_batch=False,
+ sum_over_timesteps=True,
+ sum_over_batch=True,
+ softmax_loss_function=None,
+ name=None
+)
+```
+
+Initialize self. See help(type(self)) for accurate signature.
+
+
+
+
+## Methods
+
+__call__
+
+View source
+
+``` python
+__call__(
+ y_true,
+ y_pred,
+ sample_weight=None
+)
+```
+
+Override the parent __call__ to have a customized reduce
+behavior.
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+#### Args:
+
+
+* `config`: Output of `get_config()`.
+
+
+#### Returns:
+
+A `Loss` instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/TrainingSampler.md b/docs/api_docs/python/tfa/seq2seq/TrainingSampler.md
new file mode 100644
index 0000000000..36e064c32f
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/TrainingSampler.md
@@ -0,0 +1,162 @@
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.TrainingSampler
+
+
+
+
+
+
+
+
+## Class `TrainingSampler`
+
+A Sampler for use during training.
+
+Inherits From: [`Sampler`](../../tfa/seq2seq/Sampler.md)
+
+**Aliases**: `tfa.seq2seq.sampler.TrainingSampler`
+
+
+
+Only reads inputs.
+
+Returned sample_ids are the argmax of the RNN output logits.
+
+__init__
+
+View source
+
+``` python
+__init__(time_major=False)
+```
+
+Initializer.
+
+
+#### Args:
+
+
+* `time_major`: Python bool. Whether the tensors in `inputs` are time
+ major. If `False` (default), they are assumed to be batch major.
+
+
+#### Raises:
+
+
+* `ValueError`: if `sequence_length` is not a 1D tensor or `mask` is
+ not a 2D boolean tensor.
+
+
+
+## Properties
+
+batch_size
+
+Batch size of tensor returned by `sample`.
+
+Returns a scalar int32 tensor. The return value might not
+available before the invocation of initialize(), in this case,
+ValueError is raised.
+
+sample_ids_dtype
+
+DType of tensor returned by `sample`.
+
+Returns a DType. The return value might not available before the
+invocation of initialize().
+
+sample_ids_shape
+
+Shape of tensor returned by `sample`, excluding the batch dimension.
+
+Returns a `TensorShape`. The return value might not available
+before the invocation of initialize().
+
+
+
+## Methods
+
+initialize
+
+View source
+
+``` python
+initialize(
+ inputs,
+ sequence_length=None,
+ mask=None
+)
+```
+
+Initialize the TrainSampler.
+
+
+#### Args:
+
+
+* `inputs`: A (structure of) input tensors.
+* `sequence_length`: An int32 vector tensor.
+* `mask`: A boolean 2D tensor.
+
+
+#### Returns:
+
+(finished, next_inputs), a tuple of two items. The first item is a
+ boolean vector to indicate whether the item in the batch has
+ finished. The second item is the first slide of input data based on
+ the timestep dimension (usually the second dim of the input).
+
+
+
+
+View source
+
+``` python
+next_inputs(
+ time,
+ outputs,
+ state,
+ sample_ids
+)
+```
+
+Returns `(finished, next_inputs, next_state)`.
+
+
+sample
+
+View source
+
+``` python
+sample(
+ time,
+ outputs,
+ state
+)
+```
+
+Returns `sample_ids`.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/attention_wrapper.md b/docs/api_docs/python/tfa/seq2seq/attention_wrapper.md
new file mode 100644
index 0000000000..f1a3242f3c
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/attention_wrapper.md
@@ -0,0 +1,49 @@
+
+
+
+
+
+# Module: tfa.seq2seq.attention_wrapper
+
+
+
+
+
+
+A powerful dynamic attention wrapper object.
+
+
+
+## Classes
+
+[`class AttentionMechanism`](../../tfa/seq2seq/AttentionMechanism.md)
+
+[`class AttentionWrapper`](../../tfa/seq2seq/AttentionWrapper.md): Wraps another `RNNCell` with attention.
+
+[`class AttentionWrapperState`](../../tfa/seq2seq/AttentionWrapperState.md): `namedtuple` storing the state of a `AttentionWrapper`.
+
+[`class BahdanauAttention`](../../tfa/seq2seq/BahdanauAttention.md): Implements Bahdanau-style (additive) attention.
+
+[`class BahdanauMonotonicAttention`](../../tfa/seq2seq/BahdanauMonotonicAttention.md): Monotonic attention mechanism with Bahadanau-style energy function.
+
+[`class LuongAttention`](../../tfa/seq2seq/LuongAttention.md): Implements Luong-style (multiplicative) attention scoring.
+
+[`class LuongMonotonicAttention`](../../tfa/seq2seq/LuongMonotonicAttention.md): Monotonic attention mechanism with Luong-style energy function.
+
+## Functions
+
+[`hardmax(...)`](../../tfa/seq2seq/hardmax.md): Returns batched one-hot vectors.
+
+[`monotonic_attention(...)`](../../tfa/seq2seq/monotonic_attention.md): Compute monotonic attention distribution from choosing probabilities.
+
+[`safe_cumprod(...)`](../../tfa/seq2seq/safe_cumprod.md): Computes cumprod of x in logspace using cumsum to avoid underflow.
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/basic_decoder.md b/docs/api_docs/python/tfa/seq2seq/basic_decoder.md
new file mode 100644
index 0000000000..66c66f5417
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/basic_decoder.md
@@ -0,0 +1,31 @@
+
+
+
+
+
+# Module: tfa.seq2seq.basic_decoder
+
+
+
+
+
+
+A class of Decoders that may sample to generate the next input.
+
+
+
+## Classes
+
+[`class BasicDecoder`](../../tfa/seq2seq/BasicDecoder.md): Basic sampling decoder.
+
+[`class BasicDecoderOutput`](../../tfa/seq2seq/BasicDecoderOutput.md): BasicDecoderOutput(rnn_output, sample_id)
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/beam_search_decoder.md b/docs/api_docs/python/tfa/seq2seq/beam_search_decoder.md
new file mode 100644
index 0000000000..7a342044cd
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/beam_search_decoder.md
@@ -0,0 +1,47 @@
+
+
+
+
+
+# Module: tfa.seq2seq.beam_search_decoder
+
+
+
+
+
+
+A decoder that performs beam search.
+
+
+
+## Classes
+
+[`class BeamSearchDecoder`](../../tfa/seq2seq/BeamSearchDecoder.md): BeamSearch sampling decoder.
+
+[`class BeamSearchDecoderMixin`](../../tfa/seq2seq/beam_search_decoder/BeamSearchDecoderMixin.md): BeamSearchDecoderMixin contains the common methods for
+
+[`class BeamSearchDecoderOutput`](../../tfa/seq2seq/BeamSearchDecoderOutput.md): BeamSearchDecoderOutput(scores, predicted_ids, parent_ids)
+
+[`class BeamSearchDecoderState`](../../tfa/seq2seq/BeamSearchDecoderState.md): BeamSearchDecoderState(cell_state, log_probs, finished, lengths, accumulated_attention_probs)
+
+[`class FinalBeamSearchDecoderOutput`](../../tfa/seq2seq/FinalBeamSearchDecoderOutput.md): Final outputs returned by the beam search after all decoding is
+
+## Functions
+
+[`attention_probs_from_attn_state(...)`](../../tfa/seq2seq/beam_search_decoder/attention_probs_from_attn_state.md): Calculates the average attention probabilities.
+
+[`gather_tree_from_array(...)`](../../tfa/seq2seq/gather_tree_from_array.md): Calculates the full beams for `TensorArray`s.
+
+[`get_attention_probs(...)`](../../tfa/seq2seq/beam_search_decoder/get_attention_probs.md): Get attention probabilities from the cell state.
+
+[`tile_batch(...)`](../../tfa/seq2seq/tile_batch.md): Tile the batch dimension of a (possibly nested structure of) tensor(s)
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/beam_search_decoder/BeamSearchDecoderMixin.md b/docs/api_docs/python/tfa/seq2seq/beam_search_decoder/BeamSearchDecoderMixin.md
new file mode 100644
index 0000000000..1919eb832c
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/beam_search_decoder/BeamSearchDecoderMixin.md
@@ -0,0 +1,190 @@
+
+
+
+
+
+
+
+
+
+
+
+# tfa.seq2seq.beam_search_decoder.BeamSearchDecoderMixin
+
+
+
+
+
+
+
+
+## Class `BeamSearchDecoderMixin`
+
+BeamSearchDecoderMixin contains the common methods for
+
+
+
+
+BeamSearchDecoder.
+
+It is expected to be used a base class for concrete
+BeamSearchDecoder. Since this is a mixin class, it is expected to be
+used together with other class as base.
+
+__init__
+
+View source
+
+``` python
+__init__(
+ cell,
+ beam_width,
+ output_layer=None,
+ length_penalty_weight=0.0,
+ coverage_penalty_weight=0.0,
+ reorder_tensor_arrays=True,
+ **kwargs
+)
+```
+
+Initialize the BeamSearchDecoderMixin.
+
+
+#### Args:
+
+
+* `cell`: An `RNNCell` instance.
+* `beam_width`: Python integer, the number of beams.
+* `output_layer`: (Optional) An instance of `tf.keras.layers.Layer`,
+ i.e., `tf.keras.layers.Dense`. Optional layer to apply to the RNN
+ output prior to storing the result or sampling.
+* `length_penalty_weight`: Float weight to penalize length. Disabled with
+ 0.0.
+* `coverage_penalty_weight`: Float weight to penalize the coverage of
+ source sentence. Disabled with 0.0.
+* `reorder_tensor_arrays`: If `True`, `TensorArray`s' elements within the
+ cell state will be reordered according to the beam search path. If
+ the `TensorArray` can be reordered, the stacked form will be
+ returned. Otherwise, the `TensorArray` will be returned as is. Set
+ this flag to `False` if the cell state contains `TensorArray`s that
+ are not amenable to reordering.
+* `**kwargs`: Dict, other keyword arguments for parent class.
+
+
+#### Raises:
+
+
+* `TypeError`: if `cell` is not an instance of `RNNCell`,
+ or `output_layer` is not an instance of `tf.keras.layers.Layer`.
+
+
+
+## Properties
+
+batch_size
+
+
+
+
+output_size
+
+
+
+
+tracks_own_finished
+
+The BeamSearchDecoder shuffles its beams and their finished state.
+
+For this reason, it conflicts with the `dynamic_decode` function's
+tracking of finished states. Setting this property to true avoids
+early stopping of decoding due to mismanagement of the finished state
+in `dynamic_decode`.
+
+#### Returns:
+
+`True`.
+
+
+
+
+## Methods
+
+finalize
+
+View source
+
+``` python
+finalize(
+ outputs,
+ final_state,
+ sequence_lengths
+)
+```
+
+Finalize and return the predicted_ids.
+
+
+#### Args:
+
+
+* `outputs`: An instance of BeamSearchDecoderOutput.
+* `final_state`: An instance of BeamSearchDecoderState. Passed through to
+ the output.
+* `sequence_lengths`: An `int64` tensor shaped
+ `[batch_size, beam_width]`. The sequence lengths determined for
+ each beam during decode. **NOTE** These are ignored; the updated
+ sequence lengths are stored in `final_state.lengths`.
+
+
+#### Returns:
+
+
+* `outputs`: An instance of `FinalBeamSearchDecoderOutput` where the
+ predicted_ids are the result of calling _gather_tree.
+* `final_state`: The same input instance of `BeamSearchDecoderState`.
+
+step
+
+View source
+
+``` python
+step(
+ time,
+ inputs,
+ state,
+ training=None,
+ name=None
+)
+```
+
+Perform a decoding step.
+
+
+#### Args:
+
+
+* `time`: scalar `int32` tensor.
+* `inputs`: A (structure of) input tensors.
+* `state`: A (structure of) state tensors and TensorArrays.
+* `training`: Python boolean. Indicates whether the layer should
+ behave in training mode or in inference mode. Only relevant
+ when `dropout` or `recurrent_dropout` is used.
+* `name`: Name scope for any created operations.
+
+
+#### Returns:
+
+`(outputs, next_state, next_inputs, finished)`.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/beam_search_decoder/attention_probs_from_attn_state.md b/docs/api_docs/python/tfa/seq2seq/beam_search_decoder/attention_probs_from_attn_state.md
new file mode 100644
index 0000000000..40abf87452
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/beam_search_decoder/attention_probs_from_attn_state.md
@@ -0,0 +1,45 @@
+
+
+
+
+
+# tfa.seq2seq.beam_search_decoder.attention_probs_from_attn_state
+
+
+
+
+
+
+
+
+Calculates the average attention probabilities.
+
+``` python
+tfa.seq2seq.beam_search_decoder.attention_probs_from_attn_state(attention_state)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `attention_state`: An instance of `AttentionWrapperState`.
+
+
+#### Returns:
+
+The attention probabilities in the given AttentionWrapperState.
+If there're multiple attention mechanisms, return the average value from
+all attention mechanisms.
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/beam_search_decoder/get_attention_probs.md b/docs/api_docs/python/tfa/seq2seq/beam_search_decoder/get_attention_probs.md
new file mode 100644
index 0000000000..e9e9e1dc25
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/beam_search_decoder/get_attention_probs.md
@@ -0,0 +1,57 @@
+
+
+
+
+
+# tfa.seq2seq.beam_search_decoder.get_attention_probs
+
+
+
+
+
+
+
+
+Get attention probabilities from the cell state.
+
+``` python
+tfa.seq2seq.beam_search_decoder.get_attention_probs(
+ next_cell_state,
+ coverage_penalty_weight
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `next_cell_state`: The next state from the cell, e.g. an instance of
+ AttentionWrapperState if the cell is attentional.
+* `coverage_penalty_weight`: Float weight to penalize the coverage of source
+ sentence. Disabled with 0.0.
+
+
+#### Returns:
+
+The attention probabilities with shape
+ `[batch_size, beam_width, max_time]` if coverage penalty is enabled.
+ Otherwise, returns None.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If no cell is attentional but coverage penalty is enabled.
+
diff --git a/docs/api_docs/python/tfa/seq2seq/decoder.md b/docs/api_docs/python/tfa/seq2seq/decoder.md
new file mode 100644
index 0000000000..09cf86f4f4
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/decoder.md
@@ -0,0 +1,35 @@
+
+
+
+
+
+# Module: tfa.seq2seq.decoder
+
+
+
+
+
+
+Seq2seq layer operations for use in neural networks.
+
+
+
+## Classes
+
+[`class BaseDecoder`](../../tfa/seq2seq/BaseDecoder.md): An RNN Decoder that is based on a Keras layer.
+
+[`class Decoder`](../../tfa/seq2seq/Decoder.md): An RNN Decoder abstract interface object.
+
+## Functions
+
+[`dynamic_decode(...)`](../../tfa/seq2seq/dynamic_decode.md): Perform dynamic decoding with `decoder`.
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/dynamic_decode.md b/docs/api_docs/python/tfa/seq2seq/dynamic_decode.md
new file mode 100644
index 0000000000..f222efb1a4
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/dynamic_decode.md
@@ -0,0 +1,84 @@
+
+
+
+
+
+# tfa.seq2seq.dynamic_decode
+
+
+
+
+
+
+
+
+Perform dynamic decoding with `decoder`.
+
+**Aliases**: `tfa.seq2seq.decoder.dynamic_decode`
+
+``` python
+tfa.seq2seq.dynamic_decode(
+ decoder,
+ output_time_major=False,
+ impute_finished=False,
+ maximum_iterations=None,
+ parallel_iterations=32,
+ swap_memory=False,
+ training=None,
+ scope=None,
+ **kwargs
+)
+```
+
+
+
+
+
+Calls initialize() once and step() repeatedly on the Decoder object.
+
+#### Args:
+
+
+* `decoder`: A `Decoder` instance.
+* `output_time_major`: Python boolean. Default: `False` (batch major). If
+ `True`, outputs are returned as time major tensors (this mode is
+ faster). Otherwise, outputs are returned as batch major tensors (this
+ adds extra time to the computation).
+* `impute_finished`: Python boolean. If `True`, then states for batch
+ entries which are marked as finished get copied through and the
+ corresponding outputs get zeroed out. This causes some slowdown at
+ each time step, but ensures that the final state and outputs have
+ the correct values and that backprop ignores time steps that were
+ marked as finished.
+* `maximum_iterations`: `int32` scalar, maximum allowed number of decoding
+ steps. Default is `None` (decode until the decoder is fully done).
+* `parallel_iterations`: Argument passed to `tf.while_loop`.
+* `swap_memory`: Argument passed to `tf.while_loop`.
+* `training`: Python boolean. Indicates whether the layer should behave
+ in training mode or in inference mode. Only relevant
+ when `dropout` or `recurrent_dropout` is used.
+* `scope`: Optional variable scope to use.
+* `**kwargs`: dict, other keyword arguments for dynamic_decode. It might
+ contain arguments for `BaseDecoder` to initialize, which takes all
+ tensor inputs during call().
+
+
+#### Returns:
+
+`(final_outputs, final_state, final_sequence_lengths)`.
+
+
+
+#### Raises:
+
+
+* `TypeError`: if `decoder` is not an instance of `Decoder`.
+* `ValueError`: if `maximum_iterations` is provided but is not a scalar.
+
diff --git a/docs/api_docs/python/tfa/seq2seq/gather_tree_from_array.md b/docs/api_docs/python/tfa/seq2seq/gather_tree_from_array.md
new file mode 100644
index 0000000000..b2d6205e29
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/gather_tree_from_array.md
@@ -0,0 +1,55 @@
+
+
+
+
+
+# tfa.seq2seq.gather_tree_from_array
+
+
+
+
+
+
+
+
+Calculates the full beams for `TensorArray`s.
+
+**Aliases**: `tfa.seq2seq.beam_search_decoder.gather_tree_from_array`
+
+``` python
+tfa.seq2seq.gather_tree_from_array(
+ t,
+ parent_ids,
+ sequence_length
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `t`: A stacked `TensorArray` of size `max_time` that contains `Tensor`s of
+ shape `[batch_size, beam_width, s]` or `[batch_size * beam_width, s]`
+ where `s` is the depth shape.
+* `parent_ids`: The parent ids of shape `[max_time, batch_size, beam_width]`.
+* `sequence_length`: The sequence length of shape `[batch_size, beam_width]`.
+
+
+#### Returns:
+
+A `Tensor` which is a stacked `TensorArray` of the same size and type as
+`t` and where beams are sorted in each `Tensor` according to
+`parent_ids`.
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/hardmax.md b/docs/api_docs/python/tfa/seq2seq/hardmax.md
new file mode 100644
index 0000000000..add49cd13a
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/hardmax.md
@@ -0,0 +1,49 @@
+
+
+
+
+
+# tfa.seq2seq.hardmax
+
+
+
+
+
+
+
+
+Returns batched one-hot vectors.
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.hardmax`
+
+``` python
+tfa.seq2seq.hardmax(
+ logits,
+ name=None
+)
+```
+
+
+
+
+
+The depth index containing the `1` is that of the maximum logit value.
+
+#### Args:
+
+
+* `logits`: A batch tensor of logit values.
+* `name`: Name to use when creating ops.
+
+#### Returns:
+
+A batched one-hot tensor.
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/loss.md b/docs/api_docs/python/tfa/seq2seq/loss.md
new file mode 100644
index 0000000000..d22a04dce8
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/loss.md
@@ -0,0 +1,33 @@
+
+
+
+
+
+# Module: tfa.seq2seq.loss
+
+
+
+
+
+
+Seq2seq loss operations for use in sequence models.
+
+
+
+## Classes
+
+[`class SequenceLoss`](../../tfa/seq2seq/SequenceLoss.md): Weighted cross-entropy loss for a sequence of logits.
+
+## Functions
+
+[`sequence_loss(...)`](../../tfa/seq2seq/sequence_loss.md): Weighted cross-entropy loss for a sequence of logits.
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/monotonic_attention.md b/docs/api_docs/python/tfa/seq2seq/monotonic_attention.md
new file mode 100644
index 0000000000..6353925fdf
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/monotonic_attention.md
@@ -0,0 +1,83 @@
+
+
+
+
+
+# tfa.seq2seq.monotonic_attention
+
+
+
+
+
+
+
+
+Compute monotonic attention distribution from choosing probabilities.
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.monotonic_attention`
+
+``` python
+tfa.seq2seq.monotonic_attention(
+ p_choose_i,
+ previous_attention,
+ mode
+)
+```
+
+
+
+
+
+Monotonic attention implies that the input sequence is processed in an
+explicitly left-to-right manner when generating the output sequence. In
+addition, once an input sequence element is attended to at a given output
+timestep, elements occurring before it cannot be attended to at subsequent
+output timesteps. This function generates attention distributions
+according to these assumptions. For more information, see `Online and
+Linear-Time Attention by Enforcing Monotonic Alignments`.
+
+#### Args:
+
+
+* `p_choose_i`: Probability of choosing input sequence/memory element i.
+ Should be of shape (batch_size, input_sequence_length), and should all
+ be in the range [0, 1].
+* `previous_attention`: The attention distribution from the previous output
+ timestep. Should be of shape (batch_size, input_sequence_length). For
+ the first output timestep, preevious_attention[n] should be
+ [1, 0, 0, ..., 0] for all n in [0, ... batch_size - 1].
+* `mode`: How to compute the attention distribution. Must be one of
+ 'recursive', 'parallel', or 'hard'.
+ * 'recursive' uses tf.scan to recursively compute the distribution.
+ This is slowest but is exact, general, and does not suffer from
+ numerical instabilities.
+ * 'parallel' uses parallelized cumulative-sum and cumulative-product
+ operations to compute a closed-form solution to the recurrence
+ relation defining the attention distribution. This makes it more
+ efficient than 'recursive', but it requires numerical checks which
+ make the distribution non-exact. This can be a problem in
+ particular when input_sequence_length is long and/or p_choose_i has
+ entries very close to 0 or 1.
+ * 'hard' requires that the probabilities in p_choose_i are all either
+ 0 or 1, and subsequently uses a more efficient and exact solution.
+
+
+#### Returns:
+
+A tensor of shape (batch_size, input_sequence_length) representing the
+attention distributions for each sequence in the batch.
+
+
+
+#### Raises:
+
+
+* `ValueError`: mode is not one of 'recursive', 'parallel', 'hard'.
+
diff --git a/docs/api_docs/python/tfa/seq2seq/safe_cumprod.md b/docs/api_docs/python/tfa/seq2seq/safe_cumprod.md
new file mode 100644
index 0000000000..d7f1d65e51
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/safe_cumprod.md
@@ -0,0 +1,55 @@
+
+
+
+
+
+# tfa.seq2seq.safe_cumprod
+
+
+
+
+
+
+
+
+Computes cumprod of x in logspace using cumsum to avoid underflow.
+
+**Aliases**: `tfa.seq2seq.attention_wrapper.safe_cumprod`
+
+``` python
+tfa.seq2seq.safe_cumprod(
+ x,
+ *args,
+ **kwargs
+)
+```
+
+
+
+
+
+The cumprod function and its gradient can result in numerical instabilities
+when its argument has very small and/or zero values. As long as the
+argument is all positive, we can instead compute the cumulative product as
+exp(cumsum(log(x))). This function can be called identically to
+tf.cumprod.
+
+#### Args:
+
+
+* `x`: Tensor to take the cumulative product of.
+* `*args`: Passed on to cumsum; these are identical to those in cumprod.
+* `**kwargs`: Passed on to cumsum; these are identical to those in cumprod.
+
+#### Returns:
+
+Cumulative product of x.
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/sampler.md b/docs/api_docs/python/tfa/seq2seq/sampler.md
new file mode 100644
index 0000000000..220e458c74
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/sampler.md
@@ -0,0 +1,49 @@
+
+
+
+
+
+# Module: tfa.seq2seq.sampler
+
+
+
+
+
+
+A library of sampler for use with SamplingDecoders.
+
+
+
+## Classes
+
+[`class CustomSampler`](../../tfa/seq2seq/CustomSampler.md): Base abstract class that allows the user to customize sampling.
+
+[`class GreedyEmbeddingSampler`](../../tfa/seq2seq/GreedyEmbeddingSampler.md): A sampler for use during inference.
+
+[`class InferenceSampler`](../../tfa/seq2seq/InferenceSampler.md): A helper to use during inference with a custom sampling function.
+
+[`class SampleEmbeddingSampler`](../../tfa/seq2seq/SampleEmbeddingSampler.md): A sampler for use during inference.
+
+[`class Sampler`](../../tfa/seq2seq/Sampler.md): Interface for implementing sampling in seq2seq decoders.
+
+[`class ScheduledEmbeddingTrainingSampler`](../../tfa/seq2seq/ScheduledEmbeddingTrainingSampler.md): A training sampler that adds scheduled sampling.
+
+[`class ScheduledOutputTrainingSampler`](../../tfa/seq2seq/ScheduledOutputTrainingSampler.md): A training sampler that adds scheduled sampling directly to outputs.
+
+[`class TrainingSampler`](../../tfa/seq2seq/TrainingSampler.md): A Sampler for use during training.
+
+## Functions
+
+[`bernoulli_sample(...)`](../../tfa/seq2seq/sampler/bernoulli_sample.md): Samples from Bernoulli distribution.
+
+[`categorical_sample(...)`](../../tfa/seq2seq/sampler/categorical_sample.md): Samples from categorical distribution.
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/sampler/bernoulli_sample.md b/docs/api_docs/python/tfa/seq2seq/sampler/bernoulli_sample.md
new file mode 100644
index 0000000000..13764b348a
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/sampler/bernoulli_sample.md
@@ -0,0 +1,38 @@
+
+
+
+
+
+# tfa.seq2seq.sampler.bernoulli_sample
+
+
+
+
+
+
+
+
+Samples from Bernoulli distribution.
+
+``` python
+tfa.seq2seq.sampler.bernoulli_sample(
+ probs=None,
+ logits=None,
+ dtype=tf.int32,
+ sample_shape=(),
+ seed=None
+)
+```
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/sampler/categorical_sample.md b/docs/api_docs/python/tfa/seq2seq/sampler/categorical_sample.md
new file mode 100644
index 0000000000..75fcd050d7
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/sampler/categorical_sample.md
@@ -0,0 +1,37 @@
+
+
+
+
+
+# tfa.seq2seq.sampler.categorical_sample
+
+
+
+
+
+
+
+
+Samples from categorical distribution.
+
+``` python
+tfa.seq2seq.sampler.categorical_sample(
+ logits,
+ dtype=tf.int32,
+ sample_shape=(),
+ seed=None
+)
+```
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/seq2seq/sequence_loss.md b/docs/api_docs/python/tfa/seq2seq/sequence_loss.md
new file mode 100644
index 0000000000..7ca79ab80a
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/sequence_loss.md
@@ -0,0 +1,105 @@
+
+
+
+
+
+# tfa.seq2seq.sequence_loss
+
+
+
+
+
+
+
+
+Weighted cross-entropy loss for a sequence of logits.
+
+**Aliases**: `tfa.seq2seq.loss.sequence_loss`
+
+``` python
+tfa.seq2seq.sequence_loss(
+ logits,
+ targets,
+ weights,
+ average_across_timesteps=True,
+ average_across_batch=True,
+ sum_over_timesteps=False,
+ sum_over_batch=False,
+ softmax_loss_function=None,
+ name=None
+)
+```
+
+
+
+
+
+Depending on the values of `average_across_timesteps` /
+`sum_over_timesteps` and `average_across_batch` / `sum_over_batch`, the
+return Tensor will have rank 0, 1, or 2 as these arguments reduce the
+cross-entropy at each target, which has shape
+`[batch_size, sequence_length]`, over their respective dimensions. For
+example, if `average_across_timesteps` is `True` and `average_across_batch`
+is `False`, then the return Tensor will have shape `[batch_size]`.
+
+Note that `average_across_timesteps` and `sum_over_timesteps` cannot be
+True at same time. Same for `average_across_batch` and `sum_over_batch`.
+
+The recommended loss reduction in tf 2.0 has been changed to sum_over,
+instead of weighted average. User are recommend to use `sum_over_timesteps`
+and `sum_over_batch` for reduction.
+
+#### Args:
+
+
+* `logits`: A Tensor of shape
+ `[batch_size, sequence_length, num_decoder_symbols]` and dtype float.
+ The logits correspond to the prediction across all classes at each
+ timestep.
+* `targets`: A Tensor of shape `[batch_size, sequence_length]` and dtype
+ int. The target represents the true class at each timestep.
+* `weights`: A Tensor of shape `[batch_size, sequence_length]` and dtype
+ float. `weights` constitutes the weighting of each prediction in the
+ sequence. When using `weights` as masking, set all valid timesteps to 1
+ and all padded timesteps to 0, e.g. a mask returned by
+ `tf.sequence_mask`.
+* `average_across_timesteps`: If set, sum the cost across the sequence
+ dimension and divide the cost by the total label weight across
+ timesteps.
+* `average_across_batch`: If set, sum the cost across the batch dimension and
+ divide the returned cost by the batch size.
+* `sum_over_timesteps`: If set, sum the cost across the sequence dimension
+ and divide the size of the sequence. Note that any element with 0
+ weights will be excluded from size calculation.
+* `sum_over_batch`: if set, sum the cost across the batch dimension and
+ divide the total cost by the batch size. Not that any element with 0
+ weights will be excluded from size calculation.
+* `softmax_loss_function`: Function (labels, logits) -> loss-batch
+ to be used instead of the standard softmax (the default if this is
+ None). **Note that to avoid confusion, it is required for the function
+ to accept named arguments.**
+* `name`: Optional name for this operation, defaults to "sequence_loss".
+
+
+#### Returns:
+
+A float Tensor of rank 0, 1, or 2 depending on the
+`average_across_timesteps` and `average_across_batch` arguments. By
+default, it has rank 0 (scalar) and is the weighted average cross-entropy
+(log-perplexity) per symbol.
+
+
+
+#### Raises:
+
+
+* `ValueError`: logits does not have 3 dimensions or targets does not have 2
+ dimensions or weights does not have 2 dimensions.
+
diff --git a/docs/api_docs/python/tfa/seq2seq/tile_batch.md b/docs/api_docs/python/tfa/seq2seq/tile_batch.md
new file mode 100644
index 0000000000..aff96305f4
--- /dev/null
+++ b/docs/api_docs/python/tfa/seq2seq/tile_batch.md
@@ -0,0 +1,66 @@
+
+
+
+
+
+# tfa.seq2seq.tile_batch
+
+
+
+
+
+
+
+
+Tile the batch dimension of a (possibly nested structure of) tensor(s)
+
+**Aliases**: `tfa.seq2seq.beam_search_decoder.tile_batch`
+
+``` python
+tfa.seq2seq.tile_batch(
+ t,
+ multiplier,
+ name=None
+)
+```
+
+
+
+
+t.
+
+For each tensor t in a (possibly nested structure) of tensors,
+this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed
+of minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a
+shape `[batch_size * multiplier, s0, s1, ...]` composed of minibatch
+entries `t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is
+repeated `multiplier` times.
+
+#### Args:
+
+
+* `t`: `Tensor` shaped `[batch_size, ...]`.
+* `multiplier`: Python int.
+* `name`: Name scope for any created operations.
+
+
+#### Returns:
+
+A (possibly nested structure of) `Tensor` shaped
+`[batch_size * multiplier, ...]`.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if tensor(s) `t` do not have a statically known rank or
+the rank is < 1.
+
diff --git a/docs/api_docs/python/tfa/text.md b/docs/api_docs/python/tfa/text.md
new file mode 100644
index 0000000000..07711ec029
--- /dev/null
+++ b/docs/api_docs/python/tfa/text.md
@@ -0,0 +1,63 @@
+
+
+
+
+
+# Module: tfa.text
+
+
+
+
+
+
+Additional text-processing ops.
+
+
+
+## Modules
+
+[`crf`](../tfa/text/crf.md) module
+
+[`parse_time_op`](../tfa/text/parse_time_op.md) module: Parse time ops.
+
+[`skip_gram_ops`](../tfa/text/skip_gram_ops.md) module: Skip-gram sampling ops from https://arxiv.org/abs/1301.3781.
+
+## Functions
+
+[`crf_binary_score(...)`](../tfa/text/crf_binary_score.md): Computes the binary scores of tag sequences.
+
+[`crf_decode(...)`](../tfa/text/crf_decode.md): Decode the highest scoring sequence of tags.
+
+[`crf_decode_backward(...)`](../tfa/text/crf_decode_backward.md): Computes backward decoding in a linear-chain CRF.
+
+[`crf_decode_forward(...)`](../tfa/text/crf_decode_forward.md): Computes forward decoding in a linear-chain CRF.
+
+[`crf_forward(...)`](../tfa/text/crf_forward.md): Computes the alpha values in a linear-chain CRF.
+
+[`crf_log_likelihood(...)`](../tfa/text/crf_log_likelihood.md): Computes the log-likelihood of tag sequences in a CRF.
+
+[`crf_log_norm(...)`](../tfa/text/crf_log_norm.md): Computes the normalization for a CRF.
+
+[`crf_multitag_sequence_score(...)`](../tfa/text/crf_multitag_sequence_score.md): Computes the unnormalized score of all tag sequences matching
+
+[`crf_sequence_score(...)`](../tfa/text/crf_sequence_score.md): Computes the unnormalized score for a tag sequence.
+
+[`crf_unary_score(...)`](../tfa/text/crf_unary_score.md): Computes the unary scores of tag sequences.
+
+[`parse_time(...)`](../tfa/text/parse_time.md): Parse an input string according to the provided format string into a
+
+[`skip_gram_sample(...)`](../tfa/text/skip_gram_sample.md): Generates skip-gram token and label paired Tensors from the input
+
+[`skip_gram_sample_with_text_vocab(...)`](../tfa/text/skip_gram_sample_with_text_vocab.md): Skip-gram sampling with a text vocabulary file.
+
+[`viterbi_decode(...)`](../tfa/text/viterbi_decode.md): Decode the highest scoring sequence of tags outside of TensorFlow.
+
+
+
diff --git a/docs/api_docs/python/tfa/text/crf.md b/docs/api_docs/python/tfa/text/crf.md
new file mode 100644
index 0000000000..b67d6e5b72
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf.md
@@ -0,0 +1,53 @@
+
+
+
+
+
+# Module: tfa.text.crf
+
+
+
+
+
+
+
+
+
+
+## Classes
+
+[`class CrfDecodeForwardRnnCell`](../../tfa/text/crf/CrfDecodeForwardRnnCell.md): Computes the forward decoding in a linear-chain CRF.
+
+## Functions
+
+[`crf_binary_score(...)`](../../tfa/text/crf_binary_score.md): Computes the binary scores of tag sequences.
+
+[`crf_decode(...)`](../../tfa/text/crf_decode.md): Decode the highest scoring sequence of tags.
+
+[`crf_decode_backward(...)`](../../tfa/text/crf_decode_backward.md): Computes backward decoding in a linear-chain CRF.
+
+[`crf_decode_forward(...)`](../../tfa/text/crf_decode_forward.md): Computes forward decoding in a linear-chain CRF.
+
+[`crf_forward(...)`](../../tfa/text/crf_forward.md): Computes the alpha values in a linear-chain CRF.
+
+[`crf_log_likelihood(...)`](../../tfa/text/crf_log_likelihood.md): Computes the log-likelihood of tag sequences in a CRF.
+
+[`crf_log_norm(...)`](../../tfa/text/crf_log_norm.md): Computes the normalization for a CRF.
+
+[`crf_multitag_sequence_score(...)`](../../tfa/text/crf_multitag_sequence_score.md): Computes the unnormalized score of all tag sequences matching
+
+[`crf_sequence_score(...)`](../../tfa/text/crf_sequence_score.md): Computes the unnormalized score for a tag sequence.
+
+[`crf_unary_score(...)`](../../tfa/text/crf_unary_score.md): Computes the unary scores of tag sequences.
+
+[`viterbi_decode(...)`](../../tfa/text/viterbi_decode.md): Decode the highest scoring sequence of tags outside of TensorFlow.
+
+
+
diff --git a/docs/api_docs/python/tfa/text/crf/CrfDecodeForwardRnnCell.md b/docs/api_docs/python/tfa/text/crf/CrfDecodeForwardRnnCell.md
new file mode 100644
index 0000000000..a58f94f16d
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf/CrfDecodeForwardRnnCell.md
@@ -0,0 +1,866 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# tfa.text.crf.CrfDecodeForwardRnnCell
+
+
+
+
+
+
+
+
+## Class `CrfDecodeForwardRnnCell`
+
+Computes the forward decoding in a linear-chain CRF.
+
+
+
+
+
+
+__init__
+
+View source
+
+``` python
+__init__(
+ transition_params,
+ **kwargs
+)
+```
+
+Initialize the CrfDecodeForwardRnnCell.
+
+
+#### Args:
+
+
+* `transition_params`: A [num_tags, num_tags] matrix of binary
+ potentials. This matrix is expanded into a
+ [1, num_tags, num_tags] in preparation for the broadcast
+ summation occurring within the cell.
+
+
+
+## Properties
+
+activity_regularizer
+
+Optional regularizer function for the output of this layer.
+
+
+dtype
+
+
+
+
+dynamic
+
+
+
+
+
+
+Retrieves the input tensor(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input tensor or list of input tensors.
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+* `AttributeError`: If no inbound nodes are found.
+
+
+
+Retrieves the input mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Input mask tensor (potentially None) or list of input
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+
+
+Retrieves the input shape(s) of a layer.
+
+Only applicable if the layer has exactly one input,
+i.e. if it is connected to one incoming layer, or if all inputs
+have the same shape.
+
+#### Returns:
+
+Input shape, as an integer shape tuple
+(or list of shape tuples, one tuple per input tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined input_shape.
+* `RuntimeError`: if called in Eager mode.
+
+
+
+
+
+
+losses
+
+Losses which are associated with this `Layer`.
+
+Variable regularization tensors are created when this property is accessed,
+so it is eager safe: accessing `losses` under a `tf.GradientTape` will
+propagate gradients back to the corresponding variables.
+
+#### Returns:
+
+A list of tensors.
+
+
+metrics
+
+
+
+
+name
+
+Returns the name of this module as passed or determined in the ctor.
+
+NOTE: This is not the same as the `self.name_scope.name` which includes
+parent module names.
+
+name_scope
+
+Returns a `tf.name_scope` instance for this class.
+
+
+non_trainable_variables
+
+
+
+
+non_trainable_weights
+
+
+
+
+output
+
+Retrieves the output tensor(s) of a layer.
+
+Only applicable if the layer has exactly one output,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output tensor or list of output tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to more than one incoming
+ layers.
+* `RuntimeError`: if called in Eager mode.
+
+output_mask
+
+Retrieves the output mask tensor(s) of a layer.
+
+Only applicable if the layer has exactly one inbound node,
+i.e. if it is connected to one incoming layer.
+
+#### Returns:
+
+Output mask tensor (potentially None) or list of output
+mask tensors.
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer is connected to
+more than one incoming layers.
+
+output_shape
+
+Retrieves the output shape(s) of a layer.
+
+Only applicable if the layer has one output,
+or if all outputs have the same shape.
+
+#### Returns:
+
+Output shape, as an integer shape tuple
+(or list of shape tuples, one tuple per output tensor).
+
+
+
+#### Raises:
+
+
+* `AttributeError`: if the layer has no defined output shape.
+* `RuntimeError`: if called in Eager mode.
+
+output_size
+
+Integer or TensorShape: size of outputs produced by this cell.
+
+
+state_size
+
+size(s) of state(s) used by this cell.
+
+It can be represented by an Integer, a TensorShape or a tuple of Integers
+or TensorShapes.
+
+submodules
+
+Sequence of all sub-modules.
+
+Submodules are modules which are properties of this module, or found as
+properties of modules which are properties of this module (and so on).
+
+```
+a = tf.Module()
+b = tf.Module()
+c = tf.Module()
+a.b = b
+b.c = c
+assert list(a.submodules) == [b, c]
+assert list(b.submodules) == [c]
+assert list(c.submodules) == []
+```
+
+#### Returns:
+
+A sequence of all submodules.
+
+
+trainable
+
+
+
+
+trainable_variables
+
+Sequence of trainable variables owned by this module and its submodules.
+
+Note: this method uses reflection to find variables on the current instance
+and submodules. For performance reasons you may wish to cache the result
+of calling this method if you don't expect the return value to change.
+
+#### Returns:
+
+A sequence of variables for the current module (sorted by attribute
+name) followed by variables from all submodules recursively (breadth
+first).
+
+
+trainable_weights
+
+
+
+
+updates
+
+
+
+
+variables
+
+Returns the list of all layer variables/weights.
+
+Alias of `self.weights`.
+
+#### Returns:
+
+A list of variables.
+
+
+weights
+
+Returns the list of all layer variables/weights.
+
+
+#### Returns:
+
+A list of variables.
+
+
+
+
+## Methods
+
+__call__
+
+``` python
+__call__(
+ inputs,
+ *args,
+ **kwargs
+)
+```
+
+Wraps `call`, applying pre- and post-processing steps.
+
+
+#### Arguments:
+
+
+* `inputs`: input tensor(s).
+* `*args`: additional positional arguments to be passed to `self.call`.
+* `**kwargs`: additional keyword arguments to be passed to `self.call`.
+
+
+#### Returns:
+
+Output tensor(s).
+
+
+
+#### Note:
+
+- The following optional keyword arguments are reserved for specific uses:
+ * `training`: Boolean scalar tensor of Python boolean indicating
+ whether the `call` is meant for training or inference.
+ * `mask`: Boolean input mask.
+- If the layer's `call` method takes a `mask` argument (as some Keras
+ layers do), its default value will be set to the mask generated
+ for `inputs` by the previous layer (if `input` did come from
+ a layer that generated a corresponding mask, i.e. if it came from
+ a Keras layer with masking support.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer's `call` method returns None (an invalid value).
+
+build
+
+View source
+
+``` python
+build(input_shape)
+```
+
+Creates the variables of the layer (optional, for subclass implementers).
+
+This is a method that implementers of subclasses of `Layer` or `Model`
+can override if they need a state-creation step in-between
+layer instantiation and layer call.
+
+This is typically used to create the weights of `Layer` subclasses.
+
+#### Arguments:
+
+
+* `input_shape`: Instance of `TensorShape`, or list of instances of
+ `TensorShape` if the layer expects a list of inputs
+ (one instance per input).
+
+compute_mask
+
+``` python
+compute_mask(
+ inputs,
+ mask=None
+)
+```
+
+Computes an output mask tensor.
+
+
+#### Arguments:
+
+
+* `inputs`: Tensor or list of tensors.
+* `mask`: Tensor or list of tensors.
+
+
+#### Returns:
+
+None or a tensor (or list of tensors,
+ one per output tensor of the layer).
+
+
+compute_output_shape
+
+``` python
+compute_output_shape(input_shape)
+```
+
+Computes the output shape of the layer.
+
+If the layer has not been built, this method will call `build` on the
+layer. This assumes that the layer will later be used with inputs that
+match the input shape provided here.
+
+#### Arguments:
+
+
+* `input_shape`: Shape tuple (tuple of integers)
+ or list of shape tuples (one per output tensor of the layer).
+ Shape tuples can include None for free dimensions,
+ instead of an integer.
+
+
+#### Returns:
+
+An input shape tuple.
+
+
+count_params
+
+``` python
+count_params()
+```
+
+Count the total number of scalars composing the weights.
+
+
+#### Returns:
+
+An integer count.
+
+
+
+#### Raises:
+
+
+* `ValueError`: if the layer isn't yet built
+ (in which case its weights aren't yet defined).
+
+from_config
+
+``` python
+from_config(
+ cls,
+ config
+)
+```
+
+Creates a layer from its config.
+
+This method is the reverse of `get_config`,
+capable of instantiating the same layer from the config
+dictionary. It does not handle layer connectivity
+(handled by Network), nor weights (handled by `set_weights`).
+
+#### Arguments:
+
+
+* `config`: A Python dictionary, typically the
+ output of get_config.
+
+
+#### Returns:
+
+A layer instance.
+
+
+get_config
+
+``` python
+get_config()
+```
+
+Returns the config of the layer.
+
+A layer config is a Python dictionary (serializable)
+containing the configuration of a layer.
+The same layer can be reinstantiated later
+(without its trained weights) from this configuration.
+
+The config of a layer does not include connectivity
+information, nor the layer class name. These are handled
+by `Network` (one layer of abstraction above).
+
+#### Returns:
+
+Python dictionary.
+
+
+get_initial_state
+
+``` python
+get_initial_state(
+ inputs=None,
+ batch_size=None,
+ dtype=None
+)
+```
+
+
+
+
+
+
+``` python
+get_input_at(node_index)
+```
+
+Retrieves the input tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+
+
+``` python
+get_input_mask_at(node_index)
+```
+
+Retrieves the input mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple inputs).
+
+
+
+
+``` python
+get_input_shape_at(node_index)
+```
+
+Retrieves the input shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple inputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_losses_for
+
+``` python
+get_losses_for(inputs)
+```
+
+Retrieves losses relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of loss tensors of the layer that depend on `inputs`.
+
+
+get_output_at
+
+``` python
+get_output_at(node_index)
+```
+
+Retrieves the output tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A tensor (or list of tensors if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_output_mask_at
+
+``` python
+get_output_mask_at(node_index)
+```
+
+Retrieves the output mask tensor(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A mask tensor
+(or list of tensors if the layer has multiple outputs).
+
+
+get_output_shape_at
+
+``` python
+get_output_shape_at(node_index)
+```
+
+Retrieves the output shape(s) of a layer at a given node.
+
+
+#### Arguments:
+
+
+* `node_index`: Integer, index of the node
+ from which to retrieve the attribute.
+ E.g. `node_index=0` will correspond to the
+ first time the layer was called.
+
+
+#### Returns:
+
+A shape tuple
+(or list of shape tuples if the layer has multiple outputs).
+
+
+
+#### Raises:
+
+
+* `RuntimeError`: If called in Eager mode.
+
+get_updates_for
+
+``` python
+get_updates_for(inputs)
+```
+
+Retrieves updates relevant to a specific set of inputs.
+
+
+#### Arguments:
+
+
+* `inputs`: Input tensor or list/tuple of input tensors.
+
+
+#### Returns:
+
+List of update ops of the layer that depend on `inputs`.
+
+
+get_weights
+
+``` python
+get_weights()
+```
+
+Returns the current weights of the layer.
+
+
+#### Returns:
+
+Weights values as a list of numpy arrays.
+
+
+set_weights
+
+``` python
+set_weights(weights)
+```
+
+Sets the weights of the layer, from Numpy arrays.
+
+
+#### Arguments:
+
+
+* `weights`: a list of Numpy arrays. The number
+ of arrays and their shape must match
+ number of the dimensions of the weights
+ of the layer (i.e. it should match the
+ output of `get_weights`).
+
+
+#### Raises:
+
+
+* `ValueError`: If the provided weights list does not match the
+ layer's specifications.
+
+with_name_scope
+
+``` python
+with_name_scope(
+ cls,
+ method
+)
+```
+
+Decorator to automatically enter the module name scope.
+
+```
+class MyModule(tf.Module):
+ @tf.Module.with_name_scope
+ def __call__(self, x):
+ if not hasattr(self, 'w'):
+ self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
+ return tf.matmul(x, self.w)
+```
+
+Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
+names included the module name:
+
+```
+mod = MyModule()
+mod(tf.ones([8, 32]))
+# ==>
+mod.w
+# ==>
+```
+
+#### Args:
+
+
+* `method`: The method to wrap.
+
+
+#### Returns:
+
+The original method wrapped such that it enters the module's name scope.
+
+
+
+
+
+
diff --git a/docs/api_docs/python/tfa/text/crf_binary_score.md b/docs/api_docs/python/tfa/text/crf_binary_score.md
new file mode 100644
index 0000000000..629d79d7fe
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_binary_score.md
@@ -0,0 +1,50 @@
+
+
+
+
+
+# tfa.text.crf_binary_score
+
+
+
+
+
+
+
+
+Computes the binary scores of tag sequences.
+
+**Aliases**: `tfa.text.crf.crf_binary_score`
+
+``` python
+tfa.text.crf_binary_score(
+ tag_indices,
+ sequence_lengths,
+ transition_params
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `tag_indices`: A [batch_size, max_seq_len] matrix of tag indices.
+* `sequence_lengths`: A [batch_size] vector of true sequence lengths.
+* `transition_params`: A [num_tags, num_tags] matrix of binary potentials.
+
+#### Returns:
+
+
+* `binary_scores`: A [batch_size] vector of binary scores.
+
diff --git a/docs/api_docs/python/tfa/text/crf_decode.md b/docs/api_docs/python/tfa/text/crf_decode.md
new file mode 100644
index 0000000000..e0859ee69a
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_decode.md
@@ -0,0 +1,55 @@
+
+
+
+
+
+# tfa.text.crf_decode
+
+
+
+
+
+
+
+
+Decode the highest scoring sequence of tags.
+
+**Aliases**: `tfa.text.crf.crf_decode`
+
+``` python
+tfa.text.crf_decode(
+ potentials,
+ transition_params,
+ sequence_length
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `potentials`: A [batch_size, max_seq_len, num_tags] tensor of
+ unary potentials.
+* `transition_params`: A [num_tags, num_tags] matrix of
+ binary potentials.
+* `sequence_length`: A [batch_size] vector of true sequence lengths.
+
+
+#### Returns:
+
+
+* `decode_tags`: A [batch_size, max_seq_len] matrix, with dtype `tf.int32`.
+ Contains the highest scoring tag indices.
+* `best_score`: A [batch_size] vector, containing the score of `decode_tags`.
+
diff --git a/docs/api_docs/python/tfa/text/crf_decode_backward.md b/docs/api_docs/python/tfa/text/crf_decode_backward.md
new file mode 100644
index 0000000000..242268d178
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_decode_backward.md
@@ -0,0 +1,51 @@
+
+
+
+
+
+# tfa.text.crf_decode_backward
+
+
+
+
+
+
+
+
+Computes backward decoding in a linear-chain CRF.
+
+**Aliases**: `tfa.text.crf.crf_decode_backward`
+
+``` python
+tfa.text.crf_decode_backward(
+ inputs,
+ state
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `inputs`: A [batch_size, num_tags] matrix of
+ backpointer of next step (in time order).
+* `state`: A [batch_size, 1] matrix of tag index of next step.
+
+
+#### Returns:
+
+
+* `new_tags`: A [batch_size, num_tags]
+ tensor containing the new tag indices.
+
diff --git a/docs/api_docs/python/tfa/text/crf_decode_forward.md b/docs/api_docs/python/tfa/text/crf_decode_forward.md
new file mode 100644
index 0000000000..3ea916fb37
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_decode_forward.md
@@ -0,0 +1,55 @@
+
+
+
+
+
+# tfa.text.crf_decode_forward
+
+
+
+
+
+
+
+
+Computes forward decoding in a linear-chain CRF.
+
+**Aliases**: `tfa.text.crf.crf_decode_forward`
+
+``` python
+tfa.text.crf_decode_forward(
+ inputs,
+ state,
+ transition_params,
+ sequence_lengths
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `inputs`: A [batch_size, num_tags] matrix of unary potentials.
+* `state`: A [batch_size, num_tags] matrix containing the previous step's
+ score values.
+* `transition_params`: A [num_tags, num_tags] matrix of binary potentials.
+* `sequence_lengths`: A [batch_size] vector of true sequence lengths.
+
+
+#### Returns:
+
+
+* `backpointers`: A [batch_size, num_tags] matrix of backpointers.
+* `new_state`: A [batch_size, num_tags] matrix of new score values.
+
diff --git a/docs/api_docs/python/tfa/text/crf_forward.md b/docs/api_docs/python/tfa/text/crf_forward.md
new file mode 100644
index 0000000000..51af79dbea
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_forward.md
@@ -0,0 +1,58 @@
+
+
+
+
+
+# tfa.text.crf_forward
+
+
+
+
+
+
+
+
+Computes the alpha values in a linear-chain CRF.
+
+**Aliases**: `tfa.text.crf.crf_forward`
+
+``` python
+tfa.text.crf_forward(
+ inputs,
+ state,
+ transition_params,
+ sequence_lengths
+)
+```
+
+
+
+
+
+See http://www.cs.columbia.edu/~mcollins/fb.pdf for reference.
+
+#### Args:
+
+
+* `inputs`: A [batch_size, num_tags] matrix of unary potentials.
+* `state`: A [batch_size, num_tags] matrix containing the previous alpha
+ values.
+* `transition_params`: A [num_tags, num_tags] matrix of binary potentials.
+ This matrix is expanded into a [1, num_tags, num_tags] in preparation
+ for the broadcast summation occurring within the cell.
+* `sequence_lengths`: A [batch_size] vector of true sequence lengths.
+
+
+#### Returns:
+
+
+* `new_alphas`: A [batch_size, num_tags] matrix containing the
+ new alpha values.
+
diff --git a/docs/api_docs/python/tfa/text/crf_log_likelihood.md b/docs/api_docs/python/tfa/text/crf_log_likelihood.md
new file mode 100644
index 0000000000..94110d1557
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_log_likelihood.md
@@ -0,0 +1,58 @@
+
+
+
+
+
+# tfa.text.crf_log_likelihood
+
+
+
+
+
+
+
+
+Computes the log-likelihood of tag sequences in a CRF.
+
+**Aliases**: `tfa.text.crf.crf_log_likelihood`
+
+``` python
+tfa.text.crf_log_likelihood(
+ inputs,
+ tag_indices,
+ sequence_lengths,
+ transition_params=None
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `inputs`: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
+ to use as input to the CRF layer.
+* `tag_indices`: A [batch_size, max_seq_len] matrix of tag indices for which
+ we compute the log-likelihood.
+* `sequence_lengths`: A [batch_size] vector of true sequence lengths.
+* `transition_params`: A [num_tags, num_tags] transition matrix,
+ if available.
+
+#### Returns:
+
+
+* `log_likelihood`: A [batch_size] `Tensor` containing the log-likelihood of
+ each example, given the sequence of tag indices.
+* `transition_params`: A [num_tags, num_tags] transition matrix. This is
+ either provided by the caller or created in this function.
+
diff --git a/docs/api_docs/python/tfa/text/crf_log_norm.md b/docs/api_docs/python/tfa/text/crf_log_norm.md
new file mode 100644
index 0000000000..6fa82f3eb0
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_log_norm.md
@@ -0,0 +1,51 @@
+
+
+
+
+
+# tfa.text.crf_log_norm
+
+
+
+
+
+
+
+
+Computes the normalization for a CRF.
+
+**Aliases**: `tfa.text.crf.crf_log_norm`
+
+``` python
+tfa.text.crf_log_norm(
+ inputs,
+ sequence_lengths,
+ transition_params
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `inputs`: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
+ to use as input to the CRF layer.
+* `sequence_lengths`: A [batch_size] vector of true sequence lengths.
+* `transition_params`: A [num_tags, num_tags] transition matrix.
+
+#### Returns:
+
+
+* `log_norm`: A [batch_size] vector of normalizers for a CRF.
+
diff --git a/docs/api_docs/python/tfa/text/crf_multitag_sequence_score.md b/docs/api_docs/python/tfa/text/crf_multitag_sequence_score.md
new file mode 100644
index 0000000000..3c4e5864ef
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_multitag_sequence_score.md
@@ -0,0 +1,63 @@
+
+
+
+
+
+# tfa.text.crf_multitag_sequence_score
+
+
+
+
+
+
+
+
+Computes the unnormalized score of all tag sequences matching
+
+**Aliases**: `tfa.text.crf.crf_multitag_sequence_score`
+
+``` python
+tfa.text.crf_multitag_sequence_score(
+ inputs,
+ tag_bitmap,
+ sequence_lengths,
+ transition_params
+)
+```
+
+
+
+
+tag_bitmap.
+
+tag_bitmap enables more than one tag to be considered correct at each time
+step. This is useful when an observed output at a given time step is
+consistent with more than one tag, and thus the log likelihood of that
+observation must take into account all possible consistent tags.
+
+Using one-hot vectors in tag_bitmap gives results identical to
+crf_sequence_score.
+
+#### Args:
+
+
+* `inputs`: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
+ to use as input to the CRF layer.
+* `tag_bitmap`: A [batch_size, max_seq_len, num_tags] boolean tensor
+ representing all active tags at each index for which to calculate the
+ unnormalized score.
+* `sequence_lengths`: A [batch_size] vector of true sequence lengths.
+* `transition_params`: A [num_tags, num_tags] transition matrix.
+
+#### Returns:
+
+
+* `sequence_scores`: A [batch_size] vector of unnormalized sequence scores.
+
diff --git a/docs/api_docs/python/tfa/text/crf_sequence_score.md b/docs/api_docs/python/tfa/text/crf_sequence_score.md
new file mode 100644
index 0000000000..a4ae6f99b5
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_sequence_score.md
@@ -0,0 +1,54 @@
+
+
+
+
+
+# tfa.text.crf_sequence_score
+
+
+
+
+
+
+
+
+Computes the unnormalized score for a tag sequence.
+
+**Aliases**: `tfa.text.crf.crf_sequence_score`
+
+``` python
+tfa.text.crf_sequence_score(
+ inputs,
+ tag_indices,
+ sequence_lengths,
+ transition_params
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `inputs`: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
+ to use as input to the CRF layer.
+* `tag_indices`: A [batch_size, max_seq_len] matrix of tag indices for which
+ we compute the unnormalized score.
+* `sequence_lengths`: A [batch_size] vector of true sequence lengths.
+* `transition_params`: A [num_tags, num_tags] transition matrix.
+
+#### Returns:
+
+
+* `sequence_scores`: A [batch_size] vector of unnormalized sequence scores.
+
diff --git a/docs/api_docs/python/tfa/text/crf_unary_score.md b/docs/api_docs/python/tfa/text/crf_unary_score.md
new file mode 100644
index 0000000000..f6ebbe584c
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/crf_unary_score.md
@@ -0,0 +1,50 @@
+
+
+
+
+
+# tfa.text.crf_unary_score
+
+
+
+
+
+
+
+
+Computes the unary scores of tag sequences.
+
+**Aliases**: `tfa.text.crf.crf_unary_score`
+
+``` python
+tfa.text.crf_unary_score(
+ tag_indices,
+ sequence_lengths,
+ inputs
+)
+```
+
+
+
+
+
+
+#### Args:
+
+
+* `tag_indices`: A [batch_size, max_seq_len] matrix of tag indices.
+* `sequence_lengths`: A [batch_size] vector of true sequence lengths.
+* `inputs`: A [batch_size, max_seq_len, num_tags] tensor of unary potentials.
+
+#### Returns:
+
+
+* `unary_scores`: A [batch_size] vector of unary scores.
+
diff --git a/docs/api_docs/python/tfa/text/parse_time.md b/docs/api_docs/python/tfa/text/parse_time.md
new file mode 100644
index 0000000000..f05039434a
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/parse_time.md
@@ -0,0 +1,97 @@
+
+
+
+
+
+# tfa.text.parse_time
+
+
+
+
+
+
+
+
+Parse an input string according to the provided format string into a
+
+**Aliases**: `tfa.text.parse_time_op.parse_time`
+
+``` python
+tfa.text.parse_time(
+ time_string,
+ time_format,
+ output_unit
+)
+```
+
+
+
+
+Unix time.
+
+Parse an input string according to the provided format string into a Unix
+time, the number of seconds / milliseconds / microseconds / nanoseconds
+elapsed since January 1, 1970 UTC.
+
+Uses strftime()-like formatting options, with the same extensions as
+FormatTime(), but with the exceptions that %E#S is interpreted as %E*S, and
+%E#f as %E*f. %Ez and %E*z also accept the same inputs.
+
+%Y consumes as many numeric characters as it can, so the matching
+data should always be terminated with a non-numeric. %E4Y always
+consumes exactly four characters, including any sign.
+
+Unspecified fields are taken from the default date and time of ...
+
+ "1970-01-01 00:00:00.0 +0000"
+
+For example, parsing a string of "15:45" (%H:%M) will return an
+Unix time that represents "1970-01-01 15:45:00.0 +0000".
+
+Note that ParseTime only heeds the fields year, month, day, hour,
+minute, (fractional) second, and UTC offset. Other fields, like
+weekday (%a or %A), while parsed for syntactic validity, are
+ignored in the conversion.
+
+Date and time fields that are out-of-range will be treated as
+errors rather than normalizing them like `absl::CivilSecond` does.
+For example, it is an error to parse the date "Oct 32, 2013"
+because 32 is out of range.
+
+A leap second of ":60" is normalized to ":00" of the following
+minute with fractional seconds discarded. The following table
+shows how the given seconds and subseconds will be parsed:
+
+ "59.x" -> 59.x // exact
+ "60.x" -> 00.0 // normalized
+ "00.x" -> 00.x // exact
+
+#### Args:
+
+
+* `time_string`: The input time string to be parsed.
+* `time_format`: The time format.
+* `output_unit`: The output unit of the parsed unix time. Can only be SECOND,
+ MILLISECOND, MICROSECOND, NANOSECOND.
+
+
+#### Returns:
+
+the number of seconds / milliseconds / microseconds / nanoseconds elapsed
+ since January 1, 1970 UTC.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If `output_unit` is not a valid value,
+ if parsing `time_string` according to `time_format` failed.
+
diff --git a/docs/api_docs/python/tfa/text/parse_time_op.md b/docs/api_docs/python/tfa/text/parse_time_op.md
new file mode 100644
index 0000000000..70cadfd726
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/parse_time_op.md
@@ -0,0 +1,29 @@
+
+
+
+
+
+# Module: tfa.text.parse_time_op
+
+
+
+
+
+
+Parse time ops.
+
+
+
+## Functions
+
+[`parse_time(...)`](../../tfa/text/parse_time.md): Parse an input string according to the provided format string into a
+
+
+
diff --git a/docs/api_docs/python/tfa/text/skip_gram_ops.md b/docs/api_docs/python/tfa/text/skip_gram_ops.md
new file mode 100644
index 0000000000..856a1f5f90
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/skip_gram_ops.md
@@ -0,0 +1,31 @@
+
+
+
+
+
+# Module: tfa.text.skip_gram_ops
+
+
+
+
+
+
+Skip-gram sampling ops from https://arxiv.org/abs/1301.3781.
+
+
+
+## Functions
+
+[`skip_gram_sample(...)`](../../tfa/text/skip_gram_sample.md): Generates skip-gram token and label paired Tensors from the input
+
+[`skip_gram_sample_with_text_vocab(...)`](../../tfa/text/skip_gram_sample_with_text_vocab.md): Skip-gram sampling with a text vocabulary file.
+
+
+
diff --git a/docs/api_docs/python/tfa/text/skip_gram_sample.md b/docs/api_docs/python/tfa/text/skip_gram_sample.md
new file mode 100644
index 0000000000..d612bf524e
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/skip_gram_sample.md
@@ -0,0 +1,153 @@
+
+
+
+
+
+# tfa.text.skip_gram_sample
+
+
+
+
+
+
+
+
+Generates skip-gram token and label paired Tensors from the input
+
+**Aliases**: `tfa.text.skip_gram_ops.skip_gram_sample`
+
+``` python
+tfa.text.skip_gram_sample(
+ input_tensor,
+ min_skips=1,
+ max_skips=5,
+ start=0,
+ limit=-1,
+ emit_self_as_target=False,
+ vocab_freq_table=None,
+ vocab_min_count=None,
+ vocab_subsampling=None,
+ corpus_size=None,
+ batch_size=None,
+ batch_capacity=None,
+ seed=None,
+ name=None
+)
+```
+
+
+
+
+tensor.
+
+Generates skip-gram `("token", "label")` pairs using each element in the
+rank-1 `input_tensor` as a token. The window size used for each token will
+be randomly selected from the range specified by `[min_skips, max_skips]`,
+inclusive. See https://arxiv.org/abs/1301.3781 for more details about
+skip-gram.
+
+For example, given `input_tensor = ["the", "quick", "brown", "fox",
+"jumps"]`, `min_skips = 1`, `max_skips = 2`, `emit_self_as_target = False`,
+the output `(tokens, labels)` pairs for the token "quick" will be randomly
+selected from either `(tokens=["quick", "quick"], labels=["the", "brown"])`
+for 1 skip, or `(tokens=["quick", "quick", "quick"],
+labels=["the", "brown", "fox"])` for 2 skips.
+
+If `emit_self_as_target = True`, each token will also be emitted as a label
+for itself. From the previous example, the output will be either
+`(tokens=["quick", "quick", "quick"], labels=["the", "quick", "brown"])`
+for 1 skip, or `(tokens=["quick", "quick", "quick", "quick"],
+labels=["the", "quick", "brown", "fox"])` for 2 skips.
+
+The same process is repeated for each element of `input_tensor` and
+concatenated together into the two output rank-1 `Tensors` (one for all the
+tokens, another for all the labels).
+
+If `vocab_freq_table` is specified, tokens in `input_tensor` that are not
+present in the vocabulary are discarded. Tokens whose frequency counts are
+below `vocab_min_count` are also discarded. Tokens whose frequency
+proportions in the corpus exceed `vocab_subsampling` may be randomly
+down-sampled. See Eq. 5 in http://arxiv.org/abs/1310.4546 for more details
+about subsampling.
+
+Due to the random window sizes used for each token, the lengths of the
+outputs are non-deterministic, unless `batch_size` is specified to batch
+the outputs to always return `Tensors` of length `batch_size`.
+
+#### Args:
+
+
+* `input_tensor`: A rank-1 `Tensor` from which to generate skip-gram
+ candidates.
+* `min_skips`: `int` or scalar `Tensor` specifying the minimum window size to
+ randomly use for each token. Must be >= 0 and <= `max_skips`. If
+ `min_skips` and `max_skips` are both 0, the only label outputted will
+ be the token itself when `emit_self_as_target = True` -
+ or no output otherwise.
+* `max_skips`: `int` or scalar `Tensor` specifying the maximum window size to
+ randomly use for each token. Must be >= 0.
+* `start`: `int` or scalar `Tensor` specifying the position in
+ `input_tensor` from which to start generating skip-gram candidates.
+* `limit`: `int` or scalar `Tensor` specifying the maximum number of
+ elements in `input_tensor` to use in generating skip-gram candidates.
+ -1 means to use the rest of the `Tensor` after `start`.
+* `emit_self_as_target`: `bool` or scalar `Tensor` specifying whether to emit
+ each token as a label for itself.
+* `vocab_freq_table`: (Optional) A lookup table (subclass of
+ `lookup.InitializableLookupTableBase`) that maps tokens to their raw
+ frequency counts. If specified, any token in `input_tensor` that is not
+ found in `vocab_freq_table` will be filtered out before generating
+ skip-gram candidates. While this will typically map to integer raw
+ frequency counts, it could also map to float frequency proportions.
+ `vocab_min_count` and `corpus_size` should be in the same units
+ as this.
+* `vocab_min_count`: (Optional) `int`, `float`, or scalar `Tensor` specifying
+ minimum frequency threshold (from `vocab_freq_table`) for a token to be
+ kept in `input_tensor`. If this is specified, `vocab_freq_table` must
+ also be specified - and they should both be in the same units.
+* `vocab_subsampling`: (Optional) `float` specifying frequency proportion
+ threshold for tokens from `input_tensor`. Tokens that occur more
+ frequently (based on the ratio of the token's `vocab_freq_table` value
+ to the `corpus_size`) will be randomly down-sampled. Reasonable
+ starting values may be around 1e-3 or 1e-5. If this is specified, both
+ `vocab_freq_table` and `corpus_size` must also be specified. See Eq. 5
+ in http://arxiv.org/abs/1310.4546 for more details.
+* `corpus_size`: (Optional) `int`, `float`, or scalar `Tensor` specifying the
+ total number of tokens in the corpus (e.g., sum of all the frequency
+ counts of `vocab_freq_table`). Used with `vocab_subsampling` for
+ down-sampling frequently occurring tokens. If this is specified,
+ `vocab_freq_table` and `vocab_subsampling` must also be specified.
+* `batch_size`: (Optional) `int` specifying batch size of returned `Tensors`.
+* `batch_capacity`: (Optional) `int` specifying batch capacity for the queue
+ used for batching returned `Tensors`. Only has an effect if
+ `batch_size` > 0. Defaults to 100 * `batch_size` if not specified.
+* `seed`: (Optional) `int` used to create a random seed for window size and
+ subsampling. See `set_random_seed` docs for behavior.
+* `name`: (Optional) A `string` name or a name scope for the operations.
+
+
+#### Returns:
+
+A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of
+rank-1 and has the same type as `input_tensor`. The `Tensors` will be of
+length `batch_size`; if `batch_size` is not specified, they will be of
+random length, though they will be in sync with each other as long as
+they are evaluated together.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If `vocab_freq_table` is not provided, but `vocab_min_count`,
+ `vocab_subsampling`, or `corpus_size` is specified.
+ If `vocab_subsampling` and `corpus_size` are not both present or
+ both absent.
+
diff --git a/docs/api_docs/python/tfa/text/skip_gram_sample_with_text_vocab.md b/docs/api_docs/python/tfa/text/skip_gram_sample_with_text_vocab.md
new file mode 100644
index 0000000000..9103ad45d0
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/skip_gram_sample_with_text_vocab.md
@@ -0,0 +1,148 @@
+
+
+
+
+
+# tfa.text.skip_gram_sample_with_text_vocab
+
+
+
+
+
+
+
+
+Skip-gram sampling with a text vocabulary file.
+
+**Aliases**: `tfa.text.skip_gram_ops.skip_gram_sample_with_text_vocab`
+
+``` python
+tfa.text.skip_gram_sample_with_text_vocab(
+ input_tensor,
+ vocab_freq_file,
+ vocab_token_index=0,
+ vocab_token_dtype=tf.dtypes.string,
+ vocab_freq_index=1,
+ vocab_freq_dtype=tf.dtypes.float64,
+ vocab_delimiter=',',
+ vocab_min_count=0,
+ vocab_subsampling=None,
+ corpus_size=None,
+ min_skips=1,
+ max_skips=5,
+ start=0,
+ limit=-1,
+ emit_self_as_target=False,
+ batch_size=None,
+ batch_capacity=None,
+ seed=None,
+ name=None
+)
+```
+
+
+
+
+
+Wrapper around `skip_gram_sample()` for use with a text vocabulary file.
+The vocabulary file is expected to be a plain-text file, with lines of
+`vocab_delimiter`-separated columns. The `vocab_token_index` column should
+contain the vocabulary term, while the `vocab_freq_index` column should
+contain the number of times that term occurs in the corpus. For example,
+with a text vocabulary file of:
+
+ ```
+ bonjour,fr,42
+ hello,en,777
+ hola,es,99
+ ```
+
+You should set `vocab_delimiter=","`, `vocab_token_index=0`, and
+`vocab_freq_index=2`.
+
+See `skip_gram_sample()` documentation for more details about the skip-gram
+sampling process.
+
+#### Args:
+
+
+* `input_tensor`: A rank-1 `Tensor` from which to generate skip-gram candidates.
+* `vocab_freq_file`: `string` specifying full file path to the text vocab file.
+* `vocab_token_index`: `int` specifying which column in the text vocab file
+ contains the tokens.
+* `vocab_token_dtype`: `DType` specifying the format of the tokens in the text vocab file.
+* `vocab_freq_index`: `int` specifying which column in the text vocab file
+ contains the frequency counts of the tokens.
+* `vocab_freq_dtype`: `DType` specifying the format of the frequency counts
+ in the text vocab file.
+* `vocab_delimiter`: `string` specifying the delimiter used in the text vocab
+ file.
+* `vocab_min_count`: `int`, `float`, or scalar `Tensor` specifying
+ minimum frequency threshold (from `vocab_freq_file`) for a token to be
+ kept in `input_tensor`. This should correspond with `vocab_freq_dtype`.
+* `vocab_subsampling`: (Optional) `float` specifying frequency proportion
+ threshold for tokens from `input_tensor`. Tokens that occur more
+ frequently will be randomly down-sampled. Reasonable starting values
+ may be around 1e-3 or 1e-5. See Eq. 5 in http://arxiv.org/abs/1310.4546
+ for more details.
+* `corpus_size`: (Optional) `int`, `float`, or scalar `Tensor` specifying the
+ total number of tokens in the corpus (e.g., sum of all the frequency
+ counts of `vocab_freq_file`). Used with `vocab_subsampling` for
+ down-sampling frequently occurring tokens. If this is specified,
+ `vocab_freq_file` and `vocab_subsampling` must also be specified.
+ If `corpus_size` is needed but not supplied, then it will be calculated
+ from `vocab_freq_file`. You might want to supply your own value if you
+ have already eliminated infrequent tokens from your vocabulary files
+ (where frequency < vocab_min_count) to save memory in the internal
+ token lookup table. Otherwise, the unused tokens' variables will waste
+ memory. The user-supplied `corpus_size` value must be greater than or
+ equal to the sum of all the frequency counts of `vocab_freq_file`.
+* `min_skips`: `int` or scalar `Tensor` specifying the minimum window size to
+ randomly use for each token. Must be >= 0 and <= `max_skips`. If
+ `min_skips` and `max_skips` are both 0, the only label outputted will
+ be the token itself.
+* `max_skips`: `int` or scalar `Tensor` specifying the maximum window size to
+ randomly use for each token. Must be >= 0.
+* `start`: `int` or scalar `Tensor` specifying the position in `input_tensor`
+ from which to start generating skip-gram candidates.
+* `limit`: `int` or scalar `Tensor` specifying the maximum number of elements
+ in `input_tensor` to use in generating skip-gram candidates. -1 means
+ to use the rest of the `Tensor` after `start`.
+* `emit_self_as_target`: `bool` or scalar `Tensor` specifying whether to emit
+ each token as a label for itself.
+* `batch_size`: (Optional) `int` specifying batch size of returned `Tensors`.
+* `batch_capacity`: (Optional) `int` specifying batch capacity for the queue
+ used for batching returned `Tensors`. Only has an effect if
+ `batch_size` > 0. Defaults to 100 * `batch_size` if not specified.
+* `seed`: (Optional) `int` used to create a random seed for window size and
+ subsampling. See
+ [`set_random_seed`](../../g3doc/python/constant_op.md#set_random_seed)
+ for behavior.
+* `name`: (Optional) A `string` name or a name scope for the operations.
+
+
+#### Returns:
+
+A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of
+rank-1 and has the same type as `input_tensor`. The `Tensors` will be of
+length `batch_size`; if `batch_size` is not specified, they will be of
+random length, though they will be in sync with each other as long as
+they are evaluated together.
+
+
+
+#### Raises:
+
+
+* `ValueError`: If `vocab_token_index` or `vocab_freq_index` is less than 0
+ or exceeds the number of columns in `vocab_freq_file`.
+ If `vocab_token_index` and `vocab_freq_index` are both set to the same
+ column. If any token in `vocab_freq_file` has a negative frequency.
+
diff --git a/docs/api_docs/python/tfa/text/viterbi_decode.md b/docs/api_docs/python/tfa/text/viterbi_decode.md
new file mode 100644
index 0000000000..9896db72d0
--- /dev/null
+++ b/docs/api_docs/python/tfa/text/viterbi_decode.md
@@ -0,0 +1,52 @@
+
+
+
+
+
+# tfa.text.viterbi_decode
+
+
+
+
+
+
+
+
+Decode the highest scoring sequence of tags outside of TensorFlow.
+
+**Aliases**: `tfa.text.crf.viterbi_decode`
+
+``` python
+tfa.text.viterbi_decode(
+ score,
+ transition_params
+)
+```
+
+
+
+
+
+This should only be used at test time.
+
+#### Args:
+
+
+* `score`: A [seq_len, num_tags] matrix of unary potentials.
+* `transition_params`: A [num_tags, num_tags] matrix of binary potentials.
+
+
+#### Returns:
+
+
+* `viterbi`: A [seq_len] list of integers containing the highest scoring tag
+ indices.
+* `viterbi_score`: A float containing the score for the Viterbi sequence.
+
diff --git a/requirements.txt b/requirements.txt
index c37c03f8cb..6452e8905b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1 +1 @@
-tf-nightly
\ No newline at end of file
+tensorflow>=2.1.0
\ No newline at end of file
diff --git a/tensorflow_addons/version.py b/tensorflow_addons/version.py
index 1ae4ed7ba3..c4eac20093 100644
--- a/tensorflow_addons/version.py
+++ b/tensorflow_addons/version.py
@@ -27,7 +27,7 @@
# stable release (indicated by `_VERSION_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
-_VERSION_SUFFIX = 'dev'
+_VERSION_SUFFIX = ''
# Example, '0.1.0-dev'
__version__ = '.'.join([
diff --git a/tools/ci_build/builds/release_linux.sh b/tools/ci_build/builds/release_linux.sh
index c02e674432..8ad2e41184 100755
--- a/tools/ci_build/builds/release_linux.sh
+++ b/tools/ci_build/builds/release_linux.sh
@@ -54,10 +54,10 @@ for version in ${PYTHON_VERSIONS}; do
build_pip_pkg
# Package Whl
- bazel-bin/build_pip_pkg artifacts --nightly
+ # bazel-bin/build_pip_pkg artifacts --nightly
# Uncomment and use this command for release branches
- #bazel-bin/build_pip_pkg artifacts
+ bazel-bin/build_pip_pkg artifacts
done
# Clean up
diff --git a/tools/ci_build/builds/release_macos.sh b/tools/ci_build/builds/release_macos.sh
index e683a89a42..16abef246f 100755
--- a/tools/ci_build/builds/release_macos.sh
+++ b/tools/ci_build/builds/release_macos.sh
@@ -53,10 +53,10 @@ for version in ${PYTHON_VERSIONS}; do
build_pip_pkg
# Package Whl
- bazel-bin/build_pip_pkg artifacts --nightly
+ # bazel-bin/build_pip_pkg artifacts --nightly
# Uncomment and use this command for release branches
- #bazel-bin/build_pip_pkg artifacts
+ bazel-bin/build_pip_pkg artifacts
done
# Clean up
diff --git a/tools/ci_build/builds/release_windows.sh b/tools/ci_build/builds/release_windows.sh
index a53fa5e4ad..91bf26f7ba 100644
--- a/tools/ci_build/builds/release_windows.sh
+++ b/tools/ci_build/builds/release_windows.sh
@@ -39,4 +39,4 @@ echo 'y' | ./configure.sh --quiet
--test_output=errors \
build_pip_pkg
-bazel-bin/build_pip_pkg artifacts --nightly
+bazel-bin/build_pip_pkg artifacts