diff --git a/.github/workflows/sphinx-build.yml b/.github/workflows/sphinx-build.yml index 40a3804..1bcdecc 100644 --- a/.github/workflows/sphinx-build.yml +++ b/.github/workflows/sphinx-build.yml @@ -1,4 +1,4 @@ -name: Sphinx document with GitHub Pages dependencies preinstalled +name: Documentation on: # Runs on pushes targeting the default branch diff --git a/README.md b/README.md index 73b1573..e9f787d 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,11 @@ # High Granularity Quantization +[![License Apache 2.0](https://img.shields.io/badge/license-Apache%202.0-green.svg)](LICENSE) +[![Documentation](https://github.com/calad0i/HGQ/actions/workflows/sphinx-build.yml/badge.svg)](https://calad0i.github.io/HGQ/) +[![PyPI version](https://badge.fury.io/py/hgq.svg)](https://badge.fury.io/py/hgq) + + HGQ is a framework for quantization aware training of neural networks to be deployed on FPGAs, which allows for per-weight and per-activation bitwidth optimization. Depending on the specific [application](https://arxiv.org/abs/2006.10159), HGQ could achieve up to 10x resource reduction compared to the traditional `AutoQkeras` approach, while maintaining the same accuracy. For some more challenging [tasks](https://arxiv.org/abs/2202.04976), where the model is already under-fitted, HGQ could still improve the performance under the same on-board resource consumption. For more details, please refer to our paper (link coming not too soon). diff --git a/pyproject.toml b/pyproject.toml index cb3828b..e49a9ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "HGQ" -version = "0.2.0-rc1" +version = "0.2.0b1" authors = [{ name = "Chang Sun", email = "chsun@cern.ch" }] description = "High Granularity Quantizarion" readme = "README.md"