Skip to content

Commit

Permalink
remove most tf dependencies
Browse files Browse the repository at this point in the history
make the code compatible with pytorch

make it more keras only, working also with sum rules

update keras limits

test with newer tf

fix for normalized distributions
  • Loading branch information
scarlehoff committed Nov 27, 2024
1 parent 2cf7312 commit 34c5a3b
Show file tree
Hide file tree
Showing 18 changed files with 236 additions and 182 deletions.
3 changes: 2 additions & 1 deletion conda-recipe/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ requirements:
- pip
run:
- python >=3.9,<3.13
- tensorflow >=2.10,<2.17 # 2.17 works ok but the conda-forge package for macos doesn't
- tensorflow >=2.10
- keras >=3.1
- psutil # to ensure n3fit affinity is with the right processors
- hyperopt
- mongodb
Expand Down
28 changes: 10 additions & 18 deletions n3fit/src/n3fit/backends/keras_backend/MetaModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,15 @@
from pathlib import Path
import re

from keras import backend as K
from keras import ops as Kops
from keras import optimizers as Kopt
from keras.models import Model
import numpy as np
import tensorflow as tf
from tensorflow.keras import optimizers as Kopt
from tensorflow.keras.models import Model
from tensorflow.python.keras.utils import tf_utils # pylint: disable=no-name-in-module

import n3fit.backends.keras_backend.operations as op

# We need a function to transform tensors to numpy/python primitives
# which is not part of the official TF interface and can change with the version
if hasattr(tf_utils, "to_numpy_or_python_type"):
_to_numpy_or_python_type = tf_utils.to_numpy_or_python_type
elif hasattr(tf_utils, "sync_to_numpy_or_python_type"): # from TF 2.5
_to_numpy_or_python_type = tf_utils.sync_to_numpy_or_python_type
else: # in case of disaster
_to_numpy_or_python_type = lambda ret: {k: i.numpy() for k, i in ret.items()}

# Starting with TF 2.16, a memory leak in TF https://github.com/tensorflow/tensorflow/issues/64170
# makes jit compilation unusable in GPU.
# Before TF 2.16 it was set to `False` by default. From 2.16 onwards, it is set to `True`
Expand Down Expand Up @@ -121,7 +113,7 @@ def __init__(self, input_tensors, output_tensors, scaler=None, input_values=None
self.compute_losses_function = None
self._scaler = scaler

@tf.autograph.experimental.do_not_convert
# @tf.autograph.experimental.do_not_convert
def _parse_input(self, extra_input=None):
"""Returns the input data the model was compiled with.
Introduces the extra_input in the places asigned to the placeholders.
Expand Down Expand Up @@ -173,8 +165,8 @@ def perform_fit(self, x=None, y=None, epochs=1, **kwargs):
steps_per_epoch = self._determine_steps_per_epoch(epochs)

for k, v in x_params.items():
x_params[k] = tf.repeat(v, steps_per_epoch, axis=0)
y = [tf.repeat(yi, steps_per_epoch, axis=0) for yi in y]
x_params[k] = Kops.repeat(v, steps_per_epoch, axis=0)
y = [Kops.repeat(yi, steps_per_epoch, axis=0) for yi in y]

history = super().fit(
x=x_params, y=y, epochs=epochs // steps_per_epoch, batch_size=1, **kwargs
Expand Down Expand Up @@ -228,13 +220,13 @@ def compute_losses(self):
inputs[k] = v[:1]

# Compile a evaluation function
@tf.function
@op.decorator_compiler
def losses_fun():
predictions = self(inputs)
# If we only have one dataset the output changes
if len(out_names) == 2:
predictions = [predictions]
total_loss = tf.reduce_sum(predictions, axis=0)
total_loss = Kops.sum(predictions, axis=0)
ret = [total_loss] + predictions
return dict(zip(out_names, ret))

Expand All @@ -244,7 +236,7 @@ def losses_fun():

# The output of this function is to be used by python (and numpy)
# so we need to convert the tensors
return _to_numpy_or_python_type(ret)
return op.dict_to_numpy_or_python(ret)

def compile(
self,
Expand Down
22 changes: 10 additions & 12 deletions n3fit/src/n3fit/backends/keras_backend/base_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,14 @@
The names of the layer and the activation function are the ones to be used in the n3fit runcard.
"""

from tensorflow import expand_dims, math, nn
from tensorflow.keras.layers import Dense as KerasDense
from tensorflow.keras.layers import Dropout, Lambda
from tensorflow.keras.layers import Input # pylint: disable=unused-import
from tensorflow.keras.layers import LSTM, Concatenate
from tensorflow.keras.regularizers import l1_l2
from keras.layers import Dense as KerasDense
from keras.layers import Dropout, Lambda
from keras.layers import Input # pylint: disable=unused-import
from keras.layers import LSTM, Concatenate
from keras.regularizers import l1_l2

from . import operations as ops
from .MetaLayer import MetaLayer
from .operations import concatenate_function


# Custom activation functions
def square_activation(x):
Expand All @@ -38,17 +36,17 @@ def square_singlet(x):
"""Square the singlet sector
Defined as the two first values of the NN"""
singlet_squared = x[..., :2] ** 2
return concatenate_function([singlet_squared, x[..., 2:]], axis=-1)
return ops.concatenate([singlet_squared, x[..., 2:]], axis=-1)


def modified_tanh(x):
"""A non-saturating version of the tanh function"""
return math.abs(x) * nn.tanh(x)
return ops.absolute(x) * ops.tanh(x)


def leaky_relu(x):
"""Computes the Leaky ReLU activation function"""
return nn.leaky_relu(x, alpha=0.2)
return ops.leaky_relu(x, alpha=0.2)


custom_activations = {
Expand All @@ -64,7 +62,7 @@ def LSTM_modified(**kwargs):
LSTM asks for a sample X timestep X features kind of thing so we need to reshape the input
"""
the_lstm = LSTM(**kwargs)
ExpandDim = Lambda(lambda x: expand_dims(x, axis=-1))
ExpandDim = Lambda(lambda x: ops.expand_dims(x, axis=-1))

def ReshapedLSTM(input_tensor):
if len(input_tensor.shape) == 2:
Expand Down
3 changes: 1 addition & 2 deletions n3fit/src/n3fit/backends/keras_backend/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@
import logging
from time import time

from keras.callbacks import Callback, TensorBoard
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import Callback, TensorBoard

log = logging.getLogger(__name__)

Expand Down Expand Up @@ -171,7 +171,6 @@ def on_train_begin(self, logs=None):
layer = self.model.get_layer(layer_name)
self.updateable_weights.append(layer.weights)

@tf.function
def _update_weights(self):
"""Update all the weight with the corresponding multipliers
Wrapped with tf.function to compensate the for loops as both weights variables
Expand Down
10 changes: 5 additions & 5 deletions n3fit/src/n3fit/backends/keras_backend/constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
Implementations of weight constraints for initializers
"""

import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.constraints import MinMaxNorm
from keras import backend as K
from keras import ops as Kops
from keras.constraints import MinMaxNorm


class MinMaxWeight(MinMaxNorm):
Expand All @@ -17,8 +17,8 @@ def __init__(self, min_value, max_value, **kwargs):
super().__init__(min_value=min_value, max_value=max_value, axis=1, **kwargs)

def __call__(self, w):
norms = K.sum(w, axis=self.axis, keepdims=True)
norms = Kops.sum(w, axis=self.axis, keepdims=True)
desired = (
self.rate * K.clip(norms, self.min_value, self.max_value) + (1 - self.rate) * norms
self.rate * Kops.clip(norms, self.min_value, self.max_value) + (1 - self.rate) * norms
)
return w * desired / (K.epsilon() + norms)
5 changes: 3 additions & 2 deletions n3fit/src/n3fit/backends/keras_backend/internal_state.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Library of functions that modify the internal state of Keras/Tensorflow
"""

import os

import psutil
Expand All @@ -13,10 +14,10 @@
import logging
import random as rn

import keras
from keras import backend as K
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K

log = logging.getLogger(__name__)

Expand Down
Loading

0 comments on commit 34c5a3b

Please sign in to comment.