Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Lazy converter imports and migrate to pyproject.toml #1094

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
12 changes: 6 additions & 6 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,19 @@ repos:
args: ['--line-length=125',
'--skip-string-normalization']

- repo: https://github.com/tox-dev/pyproject-fmt
rev: v2.5.0
hooks:
- id: pyproject-fmt

- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-merge-conflict
- id: check-symlinks
- id: check-toml
- id: check-yaml
- id: debug-statements
- id: end-of-file-fixer
Expand All @@ -27,19 +33,13 @@ repos:
rev: 5.13.2
hooks:
- id: isort
args: ["--profile", "black", --line-length=125]

- repo: https://github.com/asottile/pyupgrade
rev: v3.19.0
hooks:
- id: pyupgrade
args: ["--py36-plus"]

- repo: https://github.com/asottile/setup-cfg-fmt
rev: v2.7.0
hooks:
- id: setup-cfg-fmt

- repo: https://github.com/pycqa/flake8
rev: 7.1.1
hooks:
Expand Down
5 changes: 3 additions & 2 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
include LICENSE README.md CONTRIBUTING.md CITATION.cff pyproject.toml setup.py setup.cfg .clang-format
include LICENSE README.md CONTRIBUTING.md CITATION.cff pyproject.toml .clang-format
graft example-models
graft test
graft contrib
recursive-include hls4ml/templates *
global-exclude .git .gitmodules .gitlab-ci.yml
recursive-include hls4ml *.py
global-exclude .git .gitmodules .gitlab-ci.yml *.pyc
include hls4ml/backends/vivado_accelerator/supported_boards.json
30 changes: 0 additions & 30 deletions hls4ml/__init__.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,3 @@
# Temporary workaround for QKeras installation requirement, will be removed after 1.0.0
def maybe_install_qkeras():
import subprocess
import sys

QKERAS_PKG_NAME = 'QKeras'
# QKERAS_PKG_SOURCE = QKERAS_PKG_NAME
QKERAS_PKG_SOURCE = 'qkeras@git+https://github.com/fastmachinelearning/qkeras.git'

def pip_list():
p = subprocess.run([sys.executable, '-m', 'pip', 'list'], check=True, capture_output=True)
return p.stdout.decode()

def pip_install(package):
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])

all_pkgs = pip_list()
if QKERAS_PKG_NAME not in all_pkgs:
print('QKeras installation not found, installing one...')
pip_install(QKERAS_PKG_SOURCE)
print('QKeras installed.')


try:
maybe_install_qkeras()
except Exception:
print('Could not find QKeras installation, make sure you have QKeras installed.')

# End of workaround

from hls4ml import converters, report, utils # noqa: F401, E402

try:
Expand Down
File renamed without changes.
53 changes: 18 additions & 35 deletions hls4ml/converters/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import importlib
import os
import warnings

import yaml

Expand All @@ -10,33 +9,19 @@
from hls4ml.converters.keras_to_hls import get_supported_keras_layers # noqa: F401
from hls4ml.converters.keras_to_hls import parse_keras_model # noqa: F401
from hls4ml.converters.keras_to_hls import keras_to_hls, register_keras_layer_handler
from hls4ml.converters.onnx_to_hls import get_supported_onnx_layers # noqa: F401
from hls4ml.converters.onnx_to_hls import parse_onnx_model # noqa: F401
from hls4ml.converters.onnx_to_hls import onnx_to_hls, register_onnx_layer_handler
from hls4ml.converters.pytorch_to_hls import ( # noqa: F401
get_supported_pytorch_layers,
pytorch_to_hls,
register_pytorch_layer_handler,
)
from hls4ml.model import ModelGraph
from hls4ml.utils.config import create_config
from hls4ml.utils.dependency import requires
from hls4ml.utils.symbolic_utils import LUTFunction

# ----------Make converters available if the libraries can be imported----------#
try:
from hls4ml.converters.pytorch_to_hls import ( # noqa: F401
get_supported_pytorch_layers,
pytorch_to_hls,
register_pytorch_layer_handler,
)

__pytorch_enabled__ = True
except ImportError:
warnings.warn("WARNING: Pytorch converter is not enabled!", stacklevel=1)
__pytorch_enabled__ = False

try:
from hls4ml.converters.onnx_to_hls import get_supported_onnx_layers # noqa: F401
from hls4ml.converters.onnx_to_hls import onnx_to_hls, register_onnx_layer_handler

__onnx_enabled__ = True
except ImportError:
warnings.warn("WARNING: ONNX converter is not enabled!", stacklevel=1)
__onnx_enabled__ = False

# ----------Layer handling register----------#
model_types = ['keras', 'pytorch', 'onnx']

Expand All @@ -51,7 +36,7 @@
# and has 'handles' attribute
# and is defined in this module (i.e., not imported)
if callable(func) and hasattr(func, 'handles') and func.__module__ == lib.__name__:
for layer in func.handles:
for layer in func.handles: # type: ignore
if model_type == 'keras':
register_keras_layer_handler(layer, func)
elif model_type == 'pytorch':
Expand Down Expand Up @@ -93,10 +78,10 @@ def parse_yaml_config(config_file):
"""

def construct_keras_model(loader, node):
from tensorflow.keras.models import load_model

model_str = loader.construct_scalar(node)
return load_model(model_str)
import keras

return keras.models.load_model(model_str)

yaml.add_constructor('!keras_model', construct_keras_model, Loader=yaml.SafeLoader)

Expand Down Expand Up @@ -124,15 +109,9 @@ def convert_from_config(config):

model = None
if 'OnnxModel' in yamlConfig:
if __onnx_enabled__:
model = onnx_to_hls(yamlConfig)
else:
raise Exception("ONNX not found. Please install ONNX.")
model = onnx_to_hls(yamlConfig)
elif 'PytorchModel' in yamlConfig:
if __pytorch_enabled__:
model = pytorch_to_hls(yamlConfig)
else:
raise Exception("PyTorch not found. Please install PyTorch.")
model = pytorch_to_hls(yamlConfig)
else:
model = keras_to_hls(yamlConfig)

Expand Down Expand Up @@ -174,6 +153,7 @@ def _check_model_config(model_config):
return model_config


@requires('_keras')
def convert_from_keras_model(
model,
output_dir='my-hls-test',
Expand Down Expand Up @@ -237,6 +217,7 @@ def convert_from_keras_model(
return keras_to_hls(config)


@requires('_torch')
def convert_from_pytorch_model(
model,
output_dir='my-hls-test',
Expand Down Expand Up @@ -308,6 +289,7 @@ def convert_from_pytorch_model(
return pytorch_to_hls(config)


@requires('onnx')
def convert_from_onnx_model(
model,
output_dir='my-hls-test',
Expand Down Expand Up @@ -371,6 +353,7 @@ def convert_from_onnx_model(
return onnx_to_hls(config)


@requires('sr')
def convert_from_symbolic_expression(
expr,
n_symbols=None,
Expand Down
4 changes: 2 additions & 2 deletions hls4ml/converters/keras/qkeras.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from qkeras.quantizers import get_quantizer

from hls4ml.converters.keras.convolution import parse_conv1d_layer, parse_conv2d_layer
from hls4ml.converters.keras.core import parse_batchnorm_layer, parse_dense_layer
from hls4ml.converters.keras.recurrent import parse_rnn_layer
Expand Down Expand Up @@ -88,6 +86,8 @@ def parse_qrnn_layer(keras_layer, input_names, input_shapes, data_reader):

@keras_handler('QActivation')
def parse_qactivation_layer(keras_layer, input_names, input_shapes, data_reader):
from qkeras.quantizers import get_quantizer

assert keras_layer['class_name'] == 'QActivation'
supported_activations = [
'quantized_relu',
Expand Down
4 changes: 2 additions & 2 deletions hls4ml/converters/keras_to_hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,9 @@ def get_model_arch(config):
# Model instance passed in config from API
keras_model = config['KerasModel']
if isinstance(keras_model, str):
from tensorflow.keras.models import load_model
import keras

keras_model = load_model(keras_model)
keras_model = keras.models.load_model(keras_model)
model_arch = json.loads(keras_model.to_json())
reader = KerasModelReader(keras_model)
elif 'KerasJson' in config:
Expand Down
13 changes: 10 additions & 3 deletions hls4ml/converters/onnx_to_hls.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
import onnx
from onnx import helper, numpy_helper

from hls4ml.model import ModelGraph
from hls4ml.utils.dependency import requires


# ----------------------Helpers---------------------
Expand All @@ -20,7 +18,10 @@ def replace_char_inconsitency(name):
return name.replace('.', '_')


@requires('onnx')
def get_onnx_attribute(operation, name, default=None):
from onnx import helper

attr = next((x for x in operation.attribute if x.name == name), None)
if attr is None:
value = default
Expand Down Expand Up @@ -74,8 +75,11 @@ def get_input_shape(graph, node):
return rv


@requires('onnx')
def get_constant_value(graph, constant_name):
tensor = next((x for x in graph.initializer if x.name == constant_name), None)
from onnx import numpy_helper

return numpy_helper.to_array(tensor)


Expand Down Expand Up @@ -257,6 +261,7 @@ def parse_onnx_model(onnx_model):
return layer_list, input_layers, output_layers


@requires('onnx')
def onnx_to_hls(config):
"""Convert onnx model to hls model from configuration.

Expand All @@ -273,6 +278,8 @@ def onnx_to_hls(config):
# Extract model architecture
print('Interpreting Model ...')

import onnx

onnx_model = onnx.load(config['OnnxModel']) if isinstance(config['OnnxModel'], str) else config['OnnxModel']

layer_list, input_layers, output_layers = parse_onnx_model(onnx_model)
Expand Down
8 changes: 6 additions & 2 deletions hls4ml/converters/pytorch_to_hls.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import torch

from hls4ml.model import ModelGraph
from hls4ml.utils.dependency import requires


class PyTorchModelReader:
Expand All @@ -24,8 +23,11 @@ def get_weights_data(self, layer_name, var_name):
return data


@requires('_torch')
class PyTorchFileReader(PyTorchModelReader): # Inherit get_weights_data method
def __init__(self, config):
import torch

self.config = config

if not torch.cuda.is_available():
Expand Down Expand Up @@ -103,6 +105,7 @@ def decorator(function):
# ----------------------------------------------------------------


@requires('_torch')
def parse_pytorch_model(config, verbose=True):
"""Convert PyTorch model to hls4ml ModelGraph.

Expand Down Expand Up @@ -368,6 +371,7 @@ def parse_pytorch_model(config, verbose=True):
return layer_list, input_layers


@requires('_torch')
def pytorch_to_hls(config):
layer_list, input_layers = parse_pytorch_model(config)
print('Creating HLS model')
Expand Down
7 changes: 0 additions & 7 deletions hls4ml/model/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1 @@
from hls4ml.model.graph import HLSConfig, ModelGraph # noqa: F401

try:
from hls4ml.model import profiling # noqa: F401

__profiling_enabled__ = True
except ImportError:
__profiling_enabled__ = False
3 changes: 2 additions & 1 deletion hls4ml/model/optimizer/passes/qkeras.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import numpy as np
import tensorflow as tf

from hls4ml.model.layers import ApplyAlpha
from hls4ml.model.optimizer import ConfigurableOptimizerPass, OptimizerPass, register_pass
Expand Down Expand Up @@ -113,6 +112,8 @@ def match(self, node):
def transform(self, model, node):
# The quantizer has to be applied to set the scale attribute
# This must be applied to the _unquantized_ weights to obtain the correct scale
import tensorflow as tf

quantizer = node.weights['weight'].quantizer.quantizer_fn # get QKeras quantizer
weights = node.weights['weight'].data_unquantized # get weights
qweights = quantizer(tf.convert_to_tensor(weights))
Expand Down
Loading
Loading