diff --git a/.github/workflows/sphinx-build.yml b/.github/workflows/sphinx-build.yml index 1bcdecc..fe8793f 100644 --- a/.github/workflows/sphinx-build.yml +++ b/.github/workflows/sphinx-build.yml @@ -39,11 +39,7 @@ jobs: run: | cd docs make html - - name: Upload artifact - uses: actions/upload-pages-artifact@v2 - with: - # Upload entire repository - path: 'docs/_build/html' + # Deployment job deploy: environment: @@ -52,6 +48,10 @@ jobs: runs-on: ubuntu-latest needs: build steps: + - name: Upload artifact + uses: actions/upload-pages-artifact@v2 + with: + path: 'docs/_build/html' - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v2 diff --git a/docs/conf.py b/docs/conf.py index d564878..9038aaf 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -56,7 +56,7 @@ 'display_github': True, # Integrate GitHub 'github_user': 'calad0i', # Username 'github_repo': "HGQ", # Repo name - 'github_version': 'v0.2', # Version + 'github_version': 'master', # Version 'conf_py_path': '/docs/', # Path in the checkout to the docs root } diff --git a/docs/getting_started.md b/docs/getting_started.md index f7758b1..3f2b219 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -94,4 +94,4 @@ By almost bit-accurate, the model will be bit-accurate except for the following 3. For activations, bit-accuracy cannot be guaranteed. A great example of this is `softmax`. Also, unary nonlinear activations may or may not be bit-accurate with the current hls4ml implementation. Currently, if the bitwidth is very high and the input value's range is greater than a certain value, bit-accuracy will be lost due to some hardcoded LUT size in hls4ml. ``` -For a complete example, please refer to this [notebook](https://github.com/calad0i/HGQ/tree/v0.2/examples/mnist.ipynb). +For a complete example, please refer to this [notebook](https://github.com/calad0i/HGQ/tree/master/examples/mnist.ipynb). diff --git a/docs/qkeras.md b/docs/qkeras.md index 4507191..69d81f1 100644 --- a/docs/qkeras.md +++ b/docs/qkeras.md @@ -23,8 +23,16 @@ Due to the default behavior of `QKeras` quantizer, it is strongly recommended to Not all QKeras layers are not supported, such as `QConv*DBatchNorm`. If the model contains such layers, the conversion will fail. ``` + ```{warning} -The pre-requisite of this conversion is that the `QKeras` model must be a model that may be converted to a hls4ml model in a bit-accurate manner. This means that the input must be followed directly by a `QActivation` layer with a `quantized_bits` activation. If this is not the case, the conversion will fail. +The pre-requisite of this conversion is that the `QKeras` model must be a model that may be converted to a hls4ml model in a bit-accurate manner. This has two major implications: + +1. All inputs must be followed immediately by a `QActivation` layer with a `quantized_bits` activation to mark the input precision. +2. All quantizers, when applicable, must have `alpha=1`. The default arbitrary power-of-two scaling is NOT achievable in hls4ml. -Also, if the quantizer for any parameter is not set, the framework will try to derive a bitwidth that will produce a bit-accurate model. However, this may result in a huge bitwidth **silently without warning**. Hence, before running synthesis, it is strongly recommended to check the bitwidth manually. +If these conditions are not met, the conversion will fail. + +Also, if the quantizer for any parameter is missing, the framework will try to derive a bitwidth that will produce a bit-accurate model. However, this may result in a huge bitwidth **silently without warning**. Hence, before running synthesis, it is strongly recommended to check the bitwidth manually. ``` + +You can find an example of this conversion in [this notebook](https://github.com/calad0i/HGQ/tree/master/examples/qkeras.ipynb).