From e6907a1754d2d8e9fba2280e11ff17154ddd7930 Mon Sep 17 00:00:00 2001 From: juanantoniomc Date: Sat, 14 Oct 2023 01:20:44 -0700 Subject: [PATCH] Remove old training_ops target and redirect references to the new target. PiperOrigin-RevId: 573417544 --- tensorflow_probability/python/optimizer/sgld.py | 9 ++++----- .../python/optimizer/variational_sgd.py | 9 ++++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/tensorflow_probability/python/optimizer/sgld.py b/tensorflow_probability/python/optimizer/sgld.py index e40c6353aa..d51cd29574 100644 --- a/tensorflow_probability/python/optimizer/sgld.py +++ b/tensorflow_probability/python/optimizer/sgld.py @@ -20,7 +20,6 @@ from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.math.diag_jacobian import diag_jacobian -from tensorflow.python.training import training_ops __all__ = [ @@ -235,10 +234,10 @@ def _prepare(self, var_list): def _resource_apply_dense(self, grad, var): rms = self.get_slot(var, 'rms') new_grad = self._apply_noisy_update(rms, grad, var) - return training_ops.resource_apply_gradient_descent( - var.handle, - tf.cast(self._learning_rate_tensor, var.dtype.base_dtype), - new_grad, + return tf.raw_ops.ResourceApplyGradientDescent( + var=var.handle, + alpha=tf.cast(self._learning_rate_tensor, var.dtype.base_dtype), + delta=new_grad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices): diff --git a/tensorflow_probability/python/optimizer/variational_sgd.py b/tensorflow_probability/python/optimizer/variational_sgd.py index 635d6b6f5b..40285776d6 100644 --- a/tensorflow_probability/python/optimizer/variational_sgd.py +++ b/tensorflow_probability/python/optimizer/variational_sgd.py @@ -19,7 +19,6 @@ from tensorflow_probability.python.internal import assert_util from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import dtype_util -from tensorflow.python.training import training_ops __all__ = [ @@ -236,10 +235,10 @@ def _resource_apply_dense(self, grad, var): tf.cast(max_learning_rate, var.dtype.base_dtype)) newgrad = grad * learn_rates - return training_ops.resource_apply_gradient_descent( - var.handle, - tf.cast(1., var.dtype), - newgrad, + return tf.raw_ops.ResourceApplyGradientDescent( + var=var.handle, + alpha=tf.cast(1., var.dtype), + delta=newgrad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices):