Skip to content

Commit

Permalink
Remove old training_ops target and redirect references to the new tar…
Browse files Browse the repository at this point in the history
…get.

PiperOrigin-RevId: 573417544
  • Loading branch information
jam14j authored and tensorflower-gardener committed Oct 14, 2023
1 parent 53c4dc6 commit e6907a1
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 10 deletions.
9 changes: 4 additions & 5 deletions tensorflow_probability/python/optimizer/sgld.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.math.diag_jacobian import diag_jacobian
from tensorflow.python.training import training_ops


__all__ = [
Expand Down Expand Up @@ -235,10 +234,10 @@ def _prepare(self, var_list):
def _resource_apply_dense(self, grad, var):
rms = self.get_slot(var, 'rms')
new_grad = self._apply_noisy_update(rms, grad, var)
return training_ops.resource_apply_gradient_descent(
var.handle,
tf.cast(self._learning_rate_tensor, var.dtype.base_dtype),
new_grad,
return tf.raw_ops.ResourceApplyGradientDescent(
var=var.handle,
alpha=tf.cast(self._learning_rate_tensor, var.dtype.base_dtype),
delta=new_grad,
use_locking=self._use_locking)

def _resource_apply_sparse(self, grad, var, indices):
Expand Down
9 changes: 4 additions & 5 deletions tensorflow_probability/python/optimizer/variational_sgd.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow.python.training import training_ops


__all__ = [
Expand Down Expand Up @@ -236,10 +235,10 @@ def _resource_apply_dense(self, grad, var):
tf.cast(max_learning_rate, var.dtype.base_dtype))

newgrad = grad * learn_rates
return training_ops.resource_apply_gradient_descent(
var.handle,
tf.cast(1., var.dtype),
newgrad,
return tf.raw_ops.ResourceApplyGradientDescent(
var=var.handle,
alpha=tf.cast(1., var.dtype),
delta=newgrad,
use_locking=self._use_locking)

def _resource_apply_sparse(self, grad, var, indices):
Expand Down

0 comments on commit e6907a1

Please sign in to comment.