diff --git a/README.md b/README.md
index a705288a..5f645b7d 100644
--- a/README.md
+++ b/README.md
@@ -10,6 +10,9 @@ Now, there are multiple implementations available supporting different precision
We support 3 boards for hardware acceleration which are Pynq-Z1, Pynq-Z2 and Ultra96 (with PYNQ image).
+Note, this repository has now been archived and is no longer being actively maintained.
+If you are relying on this repository, we strongly recommend you switch to the [FINN compiler](https://github.com/xilinx/finn).
+
## Citation
If you find BNN-PYNQ useful, please cite the FINN paper:
@@ -74,5 +77,5 @@ Following the step-by-step instructions:
- hls-syn: contains the Vivado HLS generated RTL and IP (in the subfolder named as the target network and target platform);
- report: contains the Vivado and Vivado HLS reports;
- vivado: contains the Vivado project;
-6. Copy the generated bitstream and tcl script on the PYNQ board `pip_installation_path/bnn/bitstreams/`
+6. Copy the generated bitstream, hwh and tcl script on the PYNQ board `pip_installation_path/bnn/bitstreams/`
diff --git a/bnn/bitstreams/pynqZ1-Z2/cnvW1A1-pynqZ1-Z2.hwh b/bnn/bitstreams/pynqZ1-Z2/cnvW1A1-pynqZ1-Z2.hwh
new file mode 100755
index 00000000..576bdae2
--- /dev/null
+++ b/bnn/bitstreams/pynqZ1-Z2/cnvW1A1-pynqZ1-Z2.hwh
@@ -0,0 +1,4279 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/bitstreams/pynqZ1-Z2/cnvW1A2-pynqZ1-Z2.hwh b/bnn/bitstreams/pynqZ1-Z2/cnvW1A2-pynqZ1-Z2.hwh
new file mode 100755
index 00000000..3477b116
--- /dev/null
+++ b/bnn/bitstreams/pynqZ1-Z2/cnvW1A2-pynqZ1-Z2.hwh
@@ -0,0 +1,4279 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/bitstreams/pynqZ1-Z2/cnvW2A2-pynqZ1-Z2.hwh b/bnn/bitstreams/pynqZ1-Z2/cnvW2A2-pynqZ1-Z2.hwh
new file mode 100755
index 00000000..5aad9192
--- /dev/null
+++ b/bnn/bitstreams/pynqZ1-Z2/cnvW2A2-pynqZ1-Z2.hwh
@@ -0,0 +1,4279 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/bitstreams/pynqZ1-Z2/lfcW1A1-pynqZ1-Z2.hwh b/bnn/bitstreams/pynqZ1-Z2/lfcW1A1-pynqZ1-Z2.hwh
new file mode 100755
index 00000000..de6ebca0
--- /dev/null
+++ b/bnn/bitstreams/pynqZ1-Z2/lfcW1A1-pynqZ1-Z2.hwh
@@ -0,0 +1,4279 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/bitstreams/pynqZ1-Z2/lfcW1A2-pynqZ1-Z2.hwh b/bnn/bitstreams/pynqZ1-Z2/lfcW1A2-pynqZ1-Z2.hwh
new file mode 100755
index 00000000..483a6d10
--- /dev/null
+++ b/bnn/bitstreams/pynqZ1-Z2/lfcW1A2-pynqZ1-Z2.hwh
@@ -0,0 +1,4279 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/bitstreams/ultra96/cnvW1A1-ultra96.hwh b/bnn/bitstreams/ultra96/cnvW1A1-ultra96.hwh
new file mode 100644
index 00000000..712f6331
--- /dev/null
+++ b/bnn/bitstreams/ultra96/cnvW1A1-ultra96.hwh
@@ -0,0 +1,4572 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/bitstreams/ultra96/cnvW1A2-ultra96.hwh b/bnn/bitstreams/ultra96/cnvW1A2-ultra96.hwh
new file mode 100644
index 00000000..1413dabf
--- /dev/null
+++ b/bnn/bitstreams/ultra96/cnvW1A2-ultra96.hwh
@@ -0,0 +1,4572 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/bitstreams/ultra96/cnvW2A2-ultra96.hwh b/bnn/bitstreams/ultra96/cnvW2A2-ultra96.hwh
new file mode 100644
index 00000000..bbb304ba
--- /dev/null
+++ b/bnn/bitstreams/ultra96/cnvW2A2-ultra96.hwh
@@ -0,0 +1,4572 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/bitstreams/ultra96/lfcW1A1-ultra96.hwh b/bnn/bitstreams/ultra96/lfcW1A1-ultra96.hwh
new file mode 100644
index 00000000..e90548fb
--- /dev/null
+++ b/bnn/bitstreams/ultra96/lfcW1A1-ultra96.hwh
@@ -0,0 +1,4572 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/bitstreams/ultra96/lfcW1A2-ultra96.hwh b/bnn/bitstreams/ultra96/lfcW1A2-ultra96.hwh
new file mode 100644
index 00000000..6eca85f2
--- /dev/null
+++ b/bnn/bitstreams/ultra96/lfcW1A2-ultra96.hwh
@@ -0,0 +1,4572 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bnn/src/library/finn-hlslib b/bnn/src/library/finn-hlslib
index a868d8df..8b7f5f5d 160000
--- a/bnn/src/library/finn-hlslib
+++ b/bnn/src/library/finn-hlslib
@@ -1 +1 @@
-Subproject commit a868d8dfa7799753d262cc5476cce19d276bcdc2
+Subproject commit 8b7f5f5dcd4e3dd17eddc41253040931af28179a
diff --git a/bnn/src/network/cnvW1A2/hw/config.h b/bnn/src/network/cnvW1A2/hw/config.h
index 5324bf78..88070938 100755
--- a/bnn/src/network/cnvW1A2/hw/config.h
+++ b/bnn/src/network/cnvW1A2/hw/config.h
@@ -197,7 +197,7 @@
#define L8_MW 512
#define L8_MH 64
#define L8_WPI 1
-#define L8_API 1
+#define L8_API 16
#define L8_WPF 0
#define L8_APF 0
diff --git a/bnn/src/network/cnvW2A2/hw/config.h b/bnn/src/network/cnvW2A2/hw/config.h
index 7f56f114..b12d0ec2 100755
--- a/bnn/src/network/cnvW2A2/hw/config.h
+++ b/bnn/src/network/cnvW2A2/hw/config.h
@@ -197,7 +197,7 @@
#define L8_MW 512
#define L8_MH 64
#define L8_WPI 2
-#define L8_API 1
+#define L8_API 16
#define L8_WPF 0
#define L8_APF 0
diff --git a/bnn/src/network/make-hw.sh b/bnn/src/network/make-hw.sh
index f8132932..ca62bd8f 100755
--- a/bnn/src/network/make-hw.sh
+++ b/bnn/src/network/make-hw.sh
@@ -165,6 +165,7 @@ VIVADO_OUT_DIR="$BNN_PATH/output/vivado/$TARGET_NAME"
BITSTREAM_PATH="$BNN_PATH/output/bitstream"
TARGET_BITSTREAM="$BITSTREAM_PATH/$NETWORK-$PLATFORM.bit"
TARGET_TCL="$BITSTREAM_PATH/$NETWORK-$PLATFORM.tcl"
+TARGET_HWH="$BITSTREAM_PATH/$NETWORK-$PLATFORM.hwh"
if [[ ("$MODE" == "b") || ("$MODE" == "a") ]]; then
mkdir -p "$BNN_PATH/output/vivado"
@@ -183,6 +184,7 @@ if [[ ("$MODE" == "b") || ("$MODE" == "a") ]]; then
vivado -mode batch -notrace -source $VIVADO_SCRIPT -tclargs $HLS_IP_REPO $TARGET_NAME $VIVADO_OUT_DIR $VIVADO_SCRIPT_DIR
cp -f "$VIVADO_OUT_DIR/$TARGET_NAME.runs/impl_1/procsys_wrapper.bit" $TARGET_BITSTREAM
cp -f "$VIVADO_OUT_DIR/procsys.tcl" $TARGET_TCL
+ cp -f "$VIVADO_OUT_DIR/$TARGET_NAME.srcs/sources_1/bd/procsys/hw_handoff/procsys.hwh" $TARGET_HWH
# extract parts of the post-implementation reports
cat "$VIVADO_OUT_DIR/$TARGET_NAME.runs/impl_1/procsys_wrapper_timing_summary_routed.rpt" | grep "| Design Timing Summary" -B 3 -A 10 > $REPORT_OUT_DIR/vivado.txt
cat "$VIVADO_OUT_DIR/$TARGET_NAME.runs/impl_1/procsys_wrapper_utilization_placed.rpt" | grep "| Slice LUTs" -B 3 -A 11 >> $REPORT_OUT_DIR/vivado.txt
diff --git a/bnn/src/training/cifar10-gen-weights-W1A1.py b/bnn/src/training/cifar10-gen-weights-W1A1.py
index 1cdfc26a..3afc692c 100644
--- a/bnn/src/training/cifar10-gen-weights-W1A1.py
+++ b/bnn/src/training/cifar10-gen-weights-W1A1.py
@@ -1,43 +1,42 @@
-#BSD 3-Clause License
-#=======
-#
-#Copyright (c) 2017, Xilinx
-#All rights reserved.
-#
-#Redistribution and use in source and binary forms, with or without
-#modification, are permitted provided that the following conditions are met:
-#
-#* Redistributions of source code must retain the above copyright notice, this
+# BSD 3-Clause License
+# =======
+
+# Copyright (c) 2020, Xilinx
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
-#
-#* Redistributions in binary form must reproduce the above copyright notice,
+
+# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
-#
-#* Neither the name of the copyright holder nor the names of its
+
+# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
-#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
-import sys
from finnthesizer import *
if __name__ == "__main__":
bnnRoot = "."
npzFile = bnnRoot + "/cifar10-1w-1a.npz"
- targetDirBin = bnnRoot + "/binparam-cnvW1A1-pynq"
- targetDirHLS = bnnRoot + "/binparam-cnvW1A1-pynq/hw"
+ targetDirBin = bnnRoot + "/cnvW1A1"
+ targetDirHLS = bnnRoot + "/cnvW1A1/hw"
#topology of convolutional layers (only for config.h defines)
ifm = [32, 30, 14, 12, 5, 3]
@@ -46,23 +45,25 @@
ofm_ch = [64, 64, 128, 128, 256, 256]
filterDim = [ 3, 3, 3, 3, 3, 3]
- WeightsPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
- ActivationPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
- InputPrecisions_fractional = [7 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
- WeightsPrecisions_integer = [1 , 1 , 1 , 1 , 1 , 1 , 1, 1, 1]
- ActivationPrecisions_integer = [1 , 1 , 1 , 1 , 1 , 1 , 1, 1, 1]
- InputPrecisions_integer = [1 , 1 , 1 , 1 , 1 , 1 , 1, 1, 1]
+ WeightsPrecisions_integer = [1 , 1 , 1 , 1 , 1 , 1 , 1, 1, 1]
+ WeightsPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
+
+ InputPrecisions_integer = [1 , 1 , 1 , 1 , 1 , 1 , 1, 1, 1]
+ InputPrecisions_fractional = [7 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
+
+ ActivationPrecisions_integer = [1 , 1 , 1 , 1 , 1 , 1 , 1, 1, 16]
+ ActivationPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
classes = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
#configuration of PE and SIMD counts
- peCounts = [16, 32, 16, 16, 4, 1, 1, 1, 4]
- simdCounts = [ 3, 32, 32, 32, 32, 32, 4, 8, 1]
+ peCounts = [16, 32, 16, 16, 4, 1, 1, 1, 4]
+ simdCounts = [ 3, 32, 32, 32, 32, 32, 4, 8, 1]
if not os.path.exists(targetDirBin):
- os.mkdir(targetDirBin)
+ os.mkdir(targetDirBin)
if not os.path.exists(targetDirHLS):
- os.mkdir(targetDirHLS)
+ os.mkdir(targetDirHLS)
#read weights
rHW = BNNWeightReader(npzFile, True)
@@ -71,116 +72,94 @@
config+= " * Finnthesizer Config-File Generation\n";
config+= " *\n **/\n\n"
config+= "#ifndef __LAYER_CONFIG_H_\n#define __LAYER_CONFIG_H_\n\n"
-
# process convolutional layers
for convl in range(0, 6):
- peCount = peCounts[convl]
- simdCount = simdCounts[convl]
- WPrecision_fractional = WeightsPrecisions_fractional[convl]
- APrecision_fractional = ActivationPrecisions_fractional[convl]
- IPrecision_fractional = InputPrecisions_fractional[convl]
- WPrecision_integer = WeightsPrecisions_integer[convl]
- APrecision_integer = ActivationPrecisions_integer[convl]
- IPrecision_integer = InputPrecisions_integer[convl]
- print "Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, convl)
- if convl == 0:
+ peCount = peCounts[convl]
+ simdCount = simdCounts[convl]
+ WPrecision_fractional = WeightsPrecisions_fractional[convl]
+ APrecision_fractional = ActivationPrecisions_fractional[convl]
+ IPrecision_fractional = InputPrecisions_fractional[convl]
+ WPrecision_integer = WeightsPrecisions_integer[convl]
+ APrecision_integer = ActivationPrecisions_integer[convl]
+ IPrecision_integer = InputPrecisions_integer[convl]
+ print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, convl))
# use fixed point weights for the first layer
- (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer, usePopCount=False, numThresBits=24, numThresIntBits=16)
+ (usePopCount, numThresBits, numThresIntBits) = (False, 24, 16) if convl==0 else (True, 16, None)
+ (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ usePopCount=usePopCount, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
# compute the padded width and height
paddedH = padTo(w.shape[0], peCount)
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (convl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
-
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+ neededWMem = (paddedW * paddedH) // (simdCount * peCount)
+ neededTMem = paddedH // peCount
+ print("Layer %d: %d x %d" % (convl, paddedH, paddedW))
+ print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
+ print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, \
+ WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional))
+ m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
+
m.addMatrix(w,t,paddedW,paddedH)
-
-
- config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
-
+ config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, \
+ peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
+
+ #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
+ # m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
+
#generate binary weight and threshold files to initialize memory during runtime
#because HLS might not work for very large header files
m.createBinFiles(targetDirBin, str(convl))
- else:
- # regular binarized layer
- (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
+ # process fully-connected layers
+ for fcl in range(6,9):
+ peCount = peCounts[fcl]
+ simdCount = simdCounts[fcl]
+ WPrecision_fractional = WeightsPrecisions_fractional[fcl]
+ APrecision_fractional = ActivationPrecisions_fractional[fcl]
+ IPrecision_fractional = InputPrecisions_fractional[fcl]
+ WPrecision_integer = WeightsPrecisions_integer[fcl]
+ APrecision_integer = ActivationPrecisions_integer[fcl]
+ IPrecision_integer = InputPrecisions_integer[fcl]
+ print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, fcl))
+ if fcl == 8:
+ (w,t) = rHW.readFCBNComplex_no_thresholds(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer)
+ paddedH = padTo(w.shape[0], 64)
+ useThresholds = False
+ else:
+ (w,t) = rHW.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer)
+ paddedH = padTo(w.shape[0], peCount)
+ useThresholds = True
# compute the padded width and height
- paddedH = padTo(w.shape[0], peCount)
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (convl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+ neededWMem = (paddedW * paddedH) // (simdCount * peCount)
+ neededTMem = paddedH // peCount
+ print("Layer %d: %d x %d" % (fcl, paddedH, paddedW))
+ print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
+ print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,\
+ WPrecision_fractional, APrecision_integer, APrecision_fractional))
+ m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+
m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
-
+ config += (printFCDefines("L%d" % fcl, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, \
+ WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
+
+ #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
+ # m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(fcl) + ".h", str(fcl), useThresholds)
+
#generate binary weight and threshold files to initialize memory during runtime
#because HLS might not work for very large header files
- m.createBinFiles(targetDirBin, str(convl))
-
- # process fully-connected layers
- for fcl in range(6,9):
- peCount = peCounts[fcl]
- simdCount = simdCounts[fcl]
- WPrecision_fractional = WeightsPrecisions_fractional[fcl]
- APrecision_fractional = ActivationPrecisions_fractional[fcl]
- IPrecision_fractional = InputPrecisions_fractional[fcl]
- WPrecision_integer = WeightsPrecisions_integer[fcl]
- APrecision_integer = ActivationPrecisions_integer[fcl]
- IPrecision_integer = InputPrecisions_integer[fcl]
- print "Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, fcl)
-
- if fcl == 8:
- (w,t) = rHW.readFCBNComplex_no_thresholds(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
- paddedH = padTo(w.shape[0], 64)
- useThresholds = False
- else:
- (w,t) = rHW.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
- paddedH = padTo(w.shape[0], peCount)
- useThresholds = True
+ m.createBinFiles(targetDirBin, str(fcl), useThresholds)
- # compute the padded width and height
- paddedW = padTo(w.shape[1], simdCount)
- # compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (fcl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
-
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
- m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printFCDefines("L%d" % fcl, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(fcl) + ".h", str(fcl))
-
- #generate binary weight and threshold files to initialize memory during runtime
- #because HLS might not work for very large header files
- m.createBinFiles(targetDirBin, str(fcl), useThresholds)
-
config+="#endif //__LAYER_CONFIG_H_\n"
-
configFile = open(targetDirHLS+"/config.h", "w")
configFile.write(config)
configFile.close()
with open(targetDirBin + "/classes.txt", "w") as f:
f.write("\n".join(classes))
-
diff --git a/bnn/src/training/cifar10-gen-weights-W1A2.py b/bnn/src/training/cifar10-gen-weights-W1A2.py
index 1b3f03de..f8689dec 100644
--- a/bnn/src/training/cifar10-gen-weights-W1A2.py
+++ b/bnn/src/training/cifar10-gen-weights-W1A2.py
@@ -1,43 +1,42 @@
-#BSD 3-Clause License
-#=======
-#
-#Copyright (c) 2017, Xilinx
-#All rights reserved.
-#
-#Redistribution and use in source and binary forms, with or without
-#modification, are permitted provided that the following conditions are met:
-#
-#* Redistributions of source code must retain the above copyright notice, this
+# BSD 3-Clause License
+# =======
+
+# Copyright (c) 2020, Xilinx
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
-#
-#* Redistributions in binary form must reproduce the above copyright notice,
+
+# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
-#
-#* Neither the name of the copyright holder nor the names of its
+
+# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
-#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
-import sys
from finnthesizer import *
if __name__ == "__main__":
bnnRoot = "."
npzFile = bnnRoot + "/cifar10-1w-2a.npz"
- targetDirBin = bnnRoot + "/binparam-cnvW1A2-pynq"
- targetDirHLS = bnnRoot + "/binparam-cnvW1A2-pynq/hw"
+ targetDirBin = bnnRoot + "/cnvW1A2"
+ targetDirHLS = bnnRoot + "/cnvW1A2/hw"
#topology of convolutional layers (only for config.h defines)
ifm = [32, 30, 14, 12, 5, 3]
@@ -46,137 +45,122 @@
ofm_ch = [64, 64, 128, 128, 256, 256]
filterDim = [ 3, 3, 3, 3, 3, 3]
- WeightsPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
- ActivationPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
- InputPrecisions_fractional = [7 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
WeightsPrecisions_integer = [1 , 1 , 1 , 1 , 1 , 1 , 1, 1, 1]
- ActivationPrecisions_integer = [2 , 2 , 2 , 2 , 2 , 2 , 2, 2, 1]
+ WeightsPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
+
InputPrecisions_integer = [1 , 2 , 2 , 2 , 2 , 2 , 2, 2, 2]
+ InputPrecisions_fractional = [7 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
+
+ ActivationPrecisions_integer = [2 , 2 , 2 , 2 , 2 , 2 , 2, 2, 16]
+ ActivationPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
classes = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
#configuration of PE and SIMD counts
peCounts = [16, 32, 16, 16, 4, 1, 1, 1, 4]
- simdCounts = [3 , 32, 32, 32, 32, 32, 4, 8, 1]
+ simdCounts = [ 3, 32, 32, 32, 32, 32, 4, 8, 1]
if not os.path.exists(targetDirBin):
- os.mkdir(targetDirBin)
+ os.mkdir(targetDirBin)
if not os.path.exists(targetDirHLS):
- os.mkdir(targetDirHLS)
+ os.mkdir(targetDirHLS)
+
+ #read weights
rHW = BNNWeightReader(npzFile, True)
config = "/**\n"
config+= " * Finnthesizer Config-File Generation\n";
config+= " *\n **/\n\n"
config+= "#ifndef __LAYER_CONFIG_H_\n#define __LAYER_CONFIG_H_\n\n"
-
- # process convolutional layers
+
+ # process convolutional layers
for convl in range(0, 6):
- peCount = peCounts[convl]
- simdCount = simdCounts[convl]
- WPrecision_fractional = WeightsPrecisions_fractional[convl]
- APrecision_fractional = ActivationPrecisions_fractional[convl]
- IPrecision_fractional = InputPrecisions_fractional[convl]
- WPrecision_integer = WeightsPrecisions_integer[convl]
- APrecision_integer = ActivationPrecisions_integer[convl]
- IPrecision_integer = InputPrecisions_integer[convl]
- print "Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, convl)
- if convl == 0:
+ peCount = peCounts[convl]
+ simdCount = simdCounts[convl]
+ WPrecision_fractional = WeightsPrecisions_fractional[convl]
+ APrecision_fractional = ActivationPrecisions_fractional[convl]
+ IPrecision_fractional = InputPrecisions_fractional[convl]
+ WPrecision_integer = WeightsPrecisions_integer[convl]
+ APrecision_integer = ActivationPrecisions_integer[convl]
+ IPrecision_integer = InputPrecisions_integer[convl]
+ print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, convl))
# use fixed point weights for the first layer
- (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer, usePopCount=False, numThresBits=24, numThresIntBits=16)
+ (usePopCount, numThresBits, numThresIntBits) = (False, 24, 16) if convl==0 else (False, 16, None)
+ (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ usePopCount=usePopCount, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
# compute the padded width and height
paddedH = padTo(w.shape[0], peCount)
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (convl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
-
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+ neededWMem = (paddedW * paddedH) // (simdCount * peCount)
+ neededTMem = paddedH // peCount
+ print("Layer %d: %d x %d" % (convl, paddedH, paddedW))
+ print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
+ print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, \
+ WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional))
+ m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
+
m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
-
+ config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, \
+ peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
+
+ #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
+ # m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
+
#generate binary weight and threshold files to initialize memory during runtime
- #because HLS might not work for very large header files
+ #because HLS might not work for very large header files
m.createBinFiles(targetDirBin, str(convl))
- else:
- # regular binarized layer
- (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
+ # process fully-connected layers
+ for fcl in range(6,9):
+ peCount = peCounts[fcl]
+ simdCount = simdCounts[fcl]
+ WPrecision_fractional = WeightsPrecisions_fractional[fcl]
+ APrecision_fractional = ActivationPrecisions_fractional[fcl]
+ IPrecision_fractional = InputPrecisions_fractional[fcl]
+ WPrecision_integer = WeightsPrecisions_integer[fcl]
+ APrecision_integer = ActivationPrecisions_integer[fcl]
+ IPrecision_integer = InputPrecisions_integer[fcl]
+ print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, fcl))
+ if fcl == 8:
+ (w,t) = rHW.readFCBNComplex_no_thresholds(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer)
+ paddedH = padTo(w.shape[0], 64)
+ useThresholds = False
+ else:
+ (w,t) = rHW.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer)
+ paddedH = padTo(w.shape[0], peCount)
+ useThresholds = True
# compute the padded width and height
- paddedH = padTo(w.shape[0], peCount)
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (convl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+ neededWMem = (paddedW * paddedH) // (simdCount * peCount)
+ neededTMem = paddedH // peCount
+ print("Layer %d: %d x %d" % (fcl, paddedH, paddedW))
+ print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
+ print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,\
+ WPrecision_fractional, APrecision_integer, APrecision_fractional))
+ m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+
m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
+ config += (printFCDefines("L%d" % fcl, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, \
+ WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
+
#generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
-
+ # m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(fcl) + ".h", str(fcl), useThresholds)
+
#generate binary weight and threshold files to initialize memory during runtime
#because HLS might not work for very large header files
- m.createBinFiles(targetDirBin, str(convl))
-
- # process fully-connected layers
- for fcl in range(6,9):
- peCount = peCounts[fcl]
- simdCount = simdCounts[fcl]
- WPrecision_fractional = WeightsPrecisions_fractional[fcl]
- APrecision_fractional = ActivationPrecisions_fractional[fcl]
- IPrecision_fractional = InputPrecisions_fractional[fcl]
- WPrecision_integer = WeightsPrecisions_integer[fcl]
- APrecision_integer = ActivationPrecisions_integer[fcl]
- IPrecision_integer = InputPrecisions_integer[fcl]
- print "Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, fcl)
-
- if fcl == 8:
- (w,t) = rHW.readFCBNComplex_no_thresholds(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
- paddedH = padTo(w.shape[0], 64)
- useThresholds = False
- else:
- (w,t) = rHW.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
- paddedH = padTo(w.shape[0], peCount)
- useThresholds = True
-
- paddedW = padTo(w.shape[1], simdCount)
- # compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (fcl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
-
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
- m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printFCDefines("L%d" % fcl, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(fcl) + ".h", str(fcl))
-
- #generate binary weight and threshold files to initialize memory during runtime
- #because HLS might not work for very large header files
- m.createBinFiles(targetDirBin, str(fcl), useThresholds)
+ m.createBinFiles(targetDirBin, str(fcl), useThresholds)
config+="#endif //__LAYER_CONFIG_H_\n"
-
configFile = open(targetDirHLS+"/config.h", "w")
configFile.write(config)
configFile.close()
with open(targetDirBin + "/classes.txt", "w") as f:
f.write("\n".join(classes))
-
diff --git a/bnn/src/training/cifar10-gen-weights-W2A2.py b/bnn/src/training/cifar10-gen-weights-W2A2.py
index 1df7b920..92431c5c 100644
--- a/bnn/src/training/cifar10-gen-weights-W2A2.py
+++ b/bnn/src/training/cifar10-gen-weights-W2A2.py
@@ -1,44 +1,42 @@
-#BSD 3-Clause License
-#=======
-#
-#Copyright (c) 2017, Xilinx
-#All rights reserved.
-#
-#Redistribution and use in source and binary forms, with or without
-#modification, are permitted provided that the following conditions are met:
-#
-#* Redistributions of source code must retain the above copyright notice, this
+# BSD 3-Clause License
+# =======
+
+# Copyright (c) 2020, Xilinx
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
-#
-#* Redistributions in binary form must reproduce the above copyright notice,
+
+# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
-#
-#* Neither the name of the copyright holder nor the names of its
+
+# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
-#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
-import sys
from finnthesizer import *
if __name__ == "__main__":
-
bnnRoot = "."
npzFile = bnnRoot + "/cifar10-2w-2a.npz"
- targetDirBin = bnnRoot + "/binparam-cnvW2A2-pynq"
- targetDirHLS = bnnRoot + "/binparam-cnvW2A2-pynq/hw"
+ targetDirBin = bnnRoot + "/cnvW2A2"
+ targetDirHLS = bnnRoot + "/cnvW2A2/hw"
#topology of convolutional layers (only for config.h defines)
ifm = [32, 30, 14, 12, 5, 3]
@@ -47,12 +45,14 @@
ofm_ch = [64, 64, 128, 128, 256, 256]
filterDim = [ 3, 3, 3, 3, 3, 3]
- WeightsPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
- ActivationPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
- InputPrecisions_fractional = [7 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
WeightsPrecisions_integer = [2 , 2 , 2 , 2 , 2 , 2 , 2, 2, 2]
- ActivationPrecisions_integer = [2 , 2 , 2 , 2 , 2 , 2 , 2, 2, 1]
+ WeightsPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
+
InputPrecisions_integer = [1 , 2 , 2 , 2 , 2 , 2 , 2, 2, 2]
+ InputPrecisions_fractional = [7 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
+
+ ActivationPrecisions_integer = [2 , 2 , 2 , 2 , 2 , 2 , 2, 2, 16]
+ ActivationPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
classes = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
@@ -61,9 +61,11 @@
simdCounts = [3, 16, 16, 16, 8, 8, 2, 2, 1]
if not os.path.exists(targetDirBin):
- os.mkdir(targetDirBin)
+ os.mkdir(targetDirBin)
if not os.path.exists(targetDirHLS):
- os.mkdir(targetDirHLS)
+ os.mkdir(targetDirHLS)
+
+ #read weights
rHW = BNNWeightReader(npzFile, True)
config = "/**\n"
@@ -71,108 +73,91 @@
config+= " *\n **/\n\n"
config+= "#ifndef __LAYER_CONFIG_H_\n#define __LAYER_CONFIG_H_\n\n"
- #process convolutional layers
+ # process convolutional layers
for convl in range(0, 6):
- peCount = peCounts[convl]
- simdCount = simdCounts[convl]
- WPrecision_fractional = WeightsPrecisions_fractional[convl]
- APrecision_fractional = ActivationPrecisions_fractional[convl]
- IPrecision_fractional = InputPrecisions_fractional[convl]
- WPrecision_integer = WeightsPrecisions_integer[convl]
- APrecision_integer = ActivationPrecisions_integer[convl]
- IPrecision_integer = InputPrecisions_integer[convl]
- print "Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, convl)
- if convl == 0:
+ peCount = peCounts[convl]
+ simdCount = simdCounts[convl]
+ WPrecision_fractional = WeightsPrecisions_fractional[convl]
+ APrecision_fractional = ActivationPrecisions_fractional[convl]
+ IPrecision_fractional = InputPrecisions_fractional[convl]
+ WPrecision_integer = WeightsPrecisions_integer[convl]
+ APrecision_integer = ActivationPrecisions_integer[convl]
+ IPrecision_integer = InputPrecisions_integer[convl]
+ print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, convl))
# use fixed point weights for the first layer
- (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer, usePopCount=False, numThresBits=24, numThresIntBits=16)
+ (usePopCount, numThresBits, numThresIntBits) = (False, 24, 16) if convl==0 else (False, 16, None)
+ (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ usePopCount=usePopCount, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
# compute the padded width and height
paddedH = padTo(w.shape[0], peCount)
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (convl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
-
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+ neededWMem = (paddedW * paddedH) // (simdCount * peCount)
+ neededTMem = paddedH // peCount
+ print("Layer %d: %d x %d" % (convl, paddedH, paddedW))
+ print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
+ print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, \
+ WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional))
+ m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
+
m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
-
+ config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, \
+ peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
+
+ #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
+ # m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
+
#generate binary weight and threshold files to initialize memory during runtime
- #because HLS might not work for very large header files
+ #because HLS might not work for very large header files
m.createBinFiles(targetDirBin, str(convl))
- else:
- # regular binarized layer
- (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
+
+ # process fully-connected layers
+ for fcl in range(6,9):
+ peCount = peCounts[fcl]
+ simdCount = simdCounts[fcl]
+ WPrecision_fractional = WeightsPrecisions_fractional[fcl]
+ APrecision_fractional = ActivationPrecisions_fractional[fcl]
+ IPrecision_fractional = InputPrecisions_fractional[fcl]
+ WPrecision_integer = WeightsPrecisions_integer[fcl]
+ APrecision_integer = ActivationPrecisions_integer[fcl]
+ IPrecision_integer = InputPrecisions_integer[fcl]
+ print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, fcl))
+ if fcl == 8:
+ (w,t) = rHW.readFCBNComplex_no_thresholds(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer)
+ paddedH = padTo(w.shape[0], 64)
+ useThresholds = False
+ else:
+ (w,t) = rHW.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer)
+ paddedH = padTo(w.shape[0], peCount)
+ useThresholds = True
# compute the padded width and height
- paddedH = padTo(w.shape[0], peCount)
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (convl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+ neededWMem = (paddedW * paddedH) // (simdCount * peCount)
+ neededTMem = paddedH // peCount
+ print("Layer %d: %d x %d" % (fcl, paddedH, paddedW))
+ print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
+ print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,\
+ WPrecision_fractional, APrecision_integer, APrecision_fractional))
+ m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+
m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
+ config += (printFCDefines("L%d" % fcl, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, \
+ WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
+
#generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
-
+ # m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(fcl) + ".h", str(fcl), useThresholds)
+
#generate binary weight and threshold files to initialize memory during runtime
#because HLS might not work for very large header files
- m.createBinFiles(targetDirBin, str(convl))
-
- # process fully-connected layers
- for fcl in range(6,9):
- peCount = peCounts[fcl]
- simdCount = simdCounts[fcl]
- WPrecision_fractional = WeightsPrecisions_fractional[fcl]
- APrecision_fractional = ActivationPrecisions_fractional[fcl]
- IPrecision_fractional = InputPrecisions_fractional[fcl]
- WPrecision_integer = WeightsPrecisions_integer[fcl]
- APrecision_integer = ActivationPrecisions_integer[fcl]
- IPrecision_integer = InputPrecisions_integer[fcl]
- print "Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, fcl)
-
- if fcl == 8:
- (w,t) = rHW.readFCBNComplex_no_thresholds(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
- paddedH = padTo(w.shape[0], 64)
- useThresholds = False
- else:
- (w,t) = rHW.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
- paddedH = padTo(w.shape[0], peCount)
- useThresholds = True
-
- paddedW = padTo(w.shape[1], simdCount)
- # compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (fcl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
-
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
- m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printFCDefines("L%d" % fcl, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(fcl) + ".h", str(fcl))
-
- #generate binary weight and threshold files to initialize memory during runtime
- #because HLS might not work for very large header files
- m.createBinFiles(targetDirBin, str(fcl), useThresholds)
+ m.createBinFiles(targetDirBin, str(fcl), useThresholds)
config+="#endif //__LAYER_CONFIG_H_\n"
-
configFile = open(targetDirHLS+"/config.h", "w")
configFile.write(config)
configFile.close()
diff --git a/bnn/src/training/finnthesizer.py b/bnn/src/training/finnthesizer.py
old mode 100644
new mode 100755
index 66e9e766..4744c2b5
--- a/bnn/src/training/finnthesizer.py
+++ b/bnn/src/training/finnthesizer.py
@@ -1,485 +1,397 @@
-#BSD 3-Clause License
-#=======
-#
-#Copyright (c) 2017, Xilinx
-#All rights reserved.
-#
-#Redistribution and use in source and binary forms, with or without
-#modification, are permitted provided that the following conditions are met:
-#
-#* Redistributions of source code must retain the above copyright notice, this
+# BSD 3-Clause License
+# =======
+
+# Copyright (c) 2020, Xilinx
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
-#
-#* Redistributions in binary form must reproduce the above copyright notice,
+
+# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
-#
-#* Neither the name of the copyright holder nor the names of its
+
+# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
-#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import numpy as np
-import os
-import sys
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+__authors__ = "Ussama Zahid, Giulio Gambardella, Yaman Umuroglu, Nicholas Fraser, Christoph Doehring"
+__copyright__ = "Copyright 2020, Xilinx"
+
+import os
+import numpy as np
+from functools import reduce
# convenience function to prepare a fully-connected BNN for FINN
# with given number of (SIMD, PE) per layer
# and arbitrary precisions for both weights and activations
-def convertFCNetwork(npzFile, targetDirBin, targetDirHLS, simdCounts, peCounts, WeightsPrecisions_fractional, ActivationPrecisions_fractional, InputPrecisions_fractional, WeightsPrecisions_integer, ActivationPrecisions_integer, InputPrecisions_integer):
- numLayers = len(simdCounts)
- if not os.path.exists(targetDirBin):
- os.mkdir(targetDirBin)
- if not os.path.exists(targetDirHLS):
- os.mkdir(targetDirHLS)
- # instantiate the weight reader, note how interleaveChannels=False for a
- # fully connected network
- r = BNNWeightReader(npzFile, False)
-
- config = "/**\n"
- config+= " * Finnthesizer Config-File Generation\n";
- config+= " *\n **/\n\n"
- config+= "#ifndef __LAYER_CONFIG_H_\n#define __LAYER_CONFIG_H_\n\n"
-
- for l in range(numLayers):
- print "process layer" + str(l)
- simdCount = simdCounts[l]
- peCount = peCounts[l]
- WPrecision_fractional = WeightsPrecisions_fractional[l]
- APrecision_fractional = ActivationPrecisions_fractional[l]
- IPrecision_fractional = InputPrecisions_fractional[l]
- WPrecision_integer = WeightsPrecisions_integer[l]
- APrecision_integer = ActivationPrecisions_integer[l]
- IPrecision_integer = InputPrecisions_integer[l]
-
- # read out weights and thresholds
- (w,t) = r.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
- # compute the padded width and height
- paddedH = padTo(w.shape[0], peCount)
- paddedW = padTo(w.shape[1], simdCount)
- if (l==0): # for the first layer, we pad to multiple of 64 due to the AXI interface
- paddedW = padTo(w.shape[1], max(simdCount,64))
- if (l==numLayers-1): # for the last layer, we pad to multiple of 64 due to the AXI interface
- paddedH = padTo(w.shape[0], max(peCount,64))
-
- # compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
-
- print "Layer %d: %d x %d, SIMD = %d, PE = %d" % (l, paddedH, paddedW, simdCount, peCount)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
- # instantiate PE memory generator
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
- #add layer to config
- config += (printFCDefines("L%d" % l, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
- # pack parameters into PE memory
- m.addMatrix(w,t,paddedW,paddedH)
-
- # create HLS weight init files for initializing memory contents directly
- # while generating the bitstream
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(l) + ".h", str(l))
-
- # create binary weight files -- useful for runtime initialization since
- # HLS might freeze / not work for very large header files
- # note that it will still be necessary to declare the PE memories in
- m.createBinFiles(targetDirBin, str(l))
+def convertFCNetwork(npzFile, targetDirBin, targetDirHLS, simdCounts, peCounts, \
+ WeightsPrecisions_fractional, ActivationPrecisions_fractional, InputPrecisions_fractional, \
+ WeightsPrecisions_integer, ActivationPrecisions_integer, InputPrecisions_integer):
+
+ numLayers = len(simdCounts)
+ if not os.path.exists(targetDirBin):
+ os.mkdir(targetDirBin)
+ if not os.path.exists(targetDirHLS):
+ os.mkdir(targetDirHLS)
+ # instantiate the weight reader, note how interleaveChannels=False for a
+ # fully connected network
+ r = BNNWeightReader(npzFile, False)
+ config = "/**\n"
+ config+= " * Finnthesizer Config-File Generation\n";
+ config+= " *\n **/\n\n"
+ config+= "#ifndef __LAYER_CONFIG_H_\n#define __LAYER_CONFIG_H_\n\n"
+ for l in range(numLayers):
+ print("process layer" + str(l))
+ simdCount = simdCounts[l]
+ peCount = peCounts[l]
+ WPrecision_fractional = WeightsPrecisions_fractional[l]
+ APrecision_fractional = ActivationPrecisions_fractional[l]
+ IPrecision_fractional = InputPrecisions_fractional[l]
+ WPrecision_integer = WeightsPrecisions_integer[l]
+ APrecision_integer = ActivationPrecisions_integer[l]
+ IPrecision_integer = InputPrecisions_integer[l]
+ # read out weights and thresholds
+ (w,t) = r.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer)
+ # compute the padded width and height
+ paddedH = padTo(w.shape[0], peCount)
+ paddedW = padTo(w.shape[1], simdCount)
+ if (l==0): # for the first layer, we pad to multiple of 64 due to the AXI interface
+ paddedW = padTo(w.shape[1], max(simdCount,64))
+ if (l==numLayers-1): # for the last layer, we pad to multiple of 64 due to the AXI interface
+ paddedH = padTo(w.shape[0], max(peCount,64))
+ # compute memory needed for weights and thresholds
+ neededWMem = (paddedW * paddedH) // (simdCount * peCount)
+ neededTMem = paddedH // peCount
+ print("Layer %d: %d x %d, SIMD = %d, PE = %d" % (l, paddedH, paddedW, simdCount, peCount))
+ print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
+ print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, \
+ WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional))
+ # instantiate PE memory generator
+ m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+ #add layer to config
+ config += (printFCDefines("L%d" % l, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, \
+ WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
+
+ # pack parameters into PE memory
+ m.addMatrix(w,t,paddedW,paddedH)
+
+ # create HLS weight init files for initializing memory contents directly
+ # while generating the bitstream
+ # m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(l) + ".h", str(l))
+
+ # create binary weight files -- useful for runtime initialization since
+ # HLS might freeze / not work for very large header files
+ # note that it will still be necessary to declare the PE memories in
+ m.createBinFiles(targetDirBin, str(l))
+
# create parameter files for tiny-cnn
-
- config+="#endif //__LAYER_CONFIG_H_\n"
-
- configFile = open(targetDirHLS+"/config.h", "w")
- configFile.write(config)
- configFile.close()
+ config+="#endif //__LAYER_CONFIG_H_\n"
+ configFile = open(targetDirHLS+"/config.h", "w")
+ configFile.write(config)
+ configFile.close()
# return HW config string as C #define's for a Conv layer
def printConvDefines(prefix, kernelDim, ifm_ch, ifm_dim, ofm_ch, ofm_dim, simd, pe, wmem, tmem, wpi, api, wpf, apf):
- #network topology
- config = ""
- numb_ops = 2*ifm_ch*ofm_ch*kernelDim*kernelDim*ofm_dim*ofm_dim # 2* because of MAC
- est_latency = numb_ops/(2*simd*pe)
- config += "/**\n * Convolutional Layer %s:\n * IFM = %5d IFM_CH = %5d\n * OFM = %5d OFM_CH = %5d\n * SIMD = %5d PE = %5d\n * WMEM = %5d TMEM = %5d\n * #Ops = %5d Ext Latency = %5d\n**/\n" % (prefix, ifm_dim, ifm_ch, ofm_dim, ofm_ch, simd, pe, wmem, tmem, numb_ops, est_latency)
- config += "\n" + "#define %s_K %d" % (prefix, kernelDim)
- config += "\n" + "#define %s_IFM_CH %d" % (prefix, ifm_ch)
- config += "\n" + "#define %s_IFM_DIM %d" % (prefix, ifm_dim)
- config += "\n" + "#define %s_OFM_CH %d" % (prefix, ofm_ch)
- config += "\n" + "#define %s_OFM_DIM %d" % (prefix, ofm_dim)
- #network configuration
- config += "\n" + "#define %s_SIMD %d" % (prefix, simd)
- config += "\n" + "#define %s_PE %d" % (prefix, pe)
- config += "\n" + "#define %s_WMEM %d" % (prefix, wmem)
- config += "\n" + "#define %s_TMEM %d" % (prefix, tmem)
- #precision used
- config += "\n" + "#define %s_WPI %d" % (prefix, wpi)
- config += "\n" + "#define %s_API %d" % (prefix, api)
- config += "\n" + "#define %s_WPF %d" % (prefix, wpf)
- config += "\n" + "#define %s_APF %d\n" % (prefix, apf)
- return config
+ #network topology
+ config = ""
+ numb_ops = 2*ifm_ch*ofm_ch*kernelDim*kernelDim*ofm_dim*ofm_dim # 2* because of MAC
+ est_latency = numb_ops/(2*simd*pe)
+ config += "/**\n * Convolutional Layer %s:\n * IFM = %5d IFM_CH = %5d\n * OFM = %5d OFM_CH = %5d\n * SIMD = %5d PE = %5d\n * WMEM = %5d TMEM = %5d\n * #Ops = %5d Ext Latency = %5d\n**/\n" % (prefix, ifm_dim, ifm_ch, ofm_dim, ofm_ch, simd, pe, wmem, tmem, numb_ops, est_latency)
+ config += "\n" + "#define %s_K %d" % (prefix, kernelDim)
+ config += "\n" + "#define %s_IFM_CH %d" % (prefix, ifm_ch)
+ config += "\n" + "#define %s_IFM_DIM %d" % (prefix, ifm_dim)
+ config += "\n" + "#define %s_OFM_CH %d" % (prefix, ofm_ch)
+ config += "\n" + "#define %s_OFM_DIM %d" % (prefix, ofm_dim)
+ #network configuration
+ config += "\n" + "#define %s_SIMD %d" % (prefix, simd)
+ config += "\n" + "#define %s_PE %d" % (prefix, pe)
+ config += "\n" + "#define %s_WMEM %d" % (prefix, wmem)
+ config += "\n" + "#define %s_TMEM %d" % (prefix, tmem)
+ #precision used
+ config += "\n" + "#define %s_WPI %d" % (prefix, wpi)
+ config += "\n" + "#define %s_API %d" % (prefix, api)
+ config += "\n" + "#define %s_WPF %d" % (prefix, wpf)
+ config += "\n" + "#define %s_APF %d\n" % (prefix, apf)
+ return config
# return HW config string as C #define's for a FC layer
def printFCDefines(prefix, simd, pe, wmem, tmem, mw, mh, wpi, api, wpf, apf):
- config = ""
- numb_ops = 2*mw*mh # 2* because of MAC
- est_latency = numb_ops/(2*simd*pe)
- config += "/**\n * Fully-Connected Layer %s:\n * MatW = %5d MatH = %5d\n * SIMD = %5d PE = %5d\n * WMEM = %5d TMEM = %5d\n * #Ops = %5d Ext Latency = %5d\n**/\n" % (prefix, mw, mh, simd, pe, wmem, tmem, numb_ops, est_latency)
- config += "\n" + "#define %s_SIMD %d" % (prefix, simd)
- config += "\n" + "#define %s_PE %d" % (prefix, pe)
- config += "\n" + "#define %s_WMEM %d" % (prefix, wmem)
- config += "\n" + "#define %s_TMEM %d" % (prefix, tmem)
- config += "\n" + "#define %s_MW %d" % (prefix, mw)
- config += "\n" + "#define %s_MH %d" % (prefix, mh)
- config += "\n" + "#define %s_WPI %d" % (prefix, wpi)
- config += "\n" + "#define %s_API %d" % (prefix, api)
- config += "\n" + "#define %s_WPF %d" % (prefix, wpf)
- config += "\n" + "#define %s_APF %d\n" % (prefix, apf)
- return config
-
+ config = ""
+ numb_ops = 2*mw*mh # 2* because of MAC
+ est_latency = numb_ops/(2*simd*pe)
+ config += "/**\n * Fully-Connected Layer %s:\n * MatW = %5d MatH = %5d\n * SIMD = %5d PE = %5d\n * WMEM = %5d TMEM = %5d\n * #Ops = %5d Ext Latency = %5d\n**/\n" % (prefix, mw, mh, simd, pe, wmem, tmem, numb_ops, est_latency)
+ config += "\n" + "#define %s_SIMD %d" % (prefix, simd)
+ config += "\n" + "#define %s_PE %d" % (prefix, pe)
+ config += "\n" + "#define %s_WMEM %d" % (prefix, wmem)
+ config += "\n" + "#define %s_TMEM %d" % (prefix, tmem)
+ config += "\n" + "#define %s_MW %d" % (prefix, mw)
+ config += "\n" + "#define %s_MH %d" % (prefix, mh)
+ config += "\n" + "#define %s_WPI %d" % (prefix, wpi)
+ config += "\n" + "#define %s_API %d" % (prefix, api)
+ config += "\n" + "#define %s_WPF %d" % (prefix, wpf)
+ config += "\n" + "#define %s_APF %d\n" % (prefix, apf)
+ return config
+
# return val to nearest multiple of pad
def padTo(val, pad):
- rem = val % pad
- return val if rem == 0 else (val + pad - rem)
+ rem = val % pad
+ return val if rem == 0 else (val + pad - rem)
+
# the quantization function
def quantize(x, integer, fract):
bits=integer+fract
if (bits==1):
- return(binarize(x))
+ return(binarize(x))
n = float(2**fract) # GIULIO ADD CLIP
return np.floor(x * n + 0.5) / n
+
# the binarization function, basically the sign encoded as 1 for positive and
# 0 for negative
def binarize(w):
- return 1 if w >=0 else 0
-
-
-# convert a fully connected binarized layer plus batch normalization into
-# the simplified form (binary weight and positive threshold)
-# note that the neurons are assumed to be in the columns of the weight
-# matrix
-def makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_int=1, WPrecisions_fract=0, use_rowmajor=False, usePopCount=True):
- ins = weights.shape[0]
- outs = weights.shape[1]
- print "Extracting FCBN complex, ins = %d outs = %d" % (ins, outs)
- # we'll fill in the binarized weights and thresholds iteratively
- w_bin = range(ins*outs)
- thresholds = range(outs)
- for neuron in range(outs):
- # compute a preliminary threshold from the batchnorm parameters
- thres = mean[neuron] - bias[neuron] - (beta[neuron] / (gamma[neuron]*invstd[neuron]))
- need_flip = 0
- # ensure all neurons activate on the "positive" side, so we can use
- # greater-than-threshold activation
- if gamma[neuron]*invstd[neuron] < 0:
- need_flip = 1
- thres = -thres
- # turn threshold into "number of 1s" (popcount) instead of signed sum
- if usePopCount:
- thresholds[neuron] = int((ins + thres) / 2)
- else:
- thresholds[neuron] = int(thres)
- # binarize the synapses
- for synapse in range(ins):
- # note how we change from col major to row major if requested
- dest_ind = neuron*ins+synapse if use_rowmajor else synapse*outs+neuron
- if need_flip:
- w_bin[dest_ind] = quantize(-weights[synapse][neuron],WPrecisions_int,WPrecisions_fract)
- else:
- w_bin[dest_ind] = quantize(weights[synapse][neuron],WPrecisions_int,WPrecisions_fract)
- # reshape the output as desired
- if use_rowmajor:
- w_bin = np.asarray(w_bin).reshape((outs, ins))
- else:
- w_bin = np.asarray(w_bin).reshape((ins, outs))
-
- return (w_bin, thresholds)
+ return np.where(w < 0, 0, 1)
# convert a fully connected layer plus batch normalization into
# the simplified form (quantized weight and multiple thresholds)
-# note that the neurons are assumed to be in the columns of the weight
-# matrix
-def makeFCBNComplex_QNN(weights, bias, beta, gamma, mean, invstd, WPrecisions_fract, APrecisions_fract, WPrecisions_int, APrecisions_int, use_rowmajor=False, numThresBits=16, numThresIntBits=None):
- ins = weights.shape[0]
- outs = weights.shape[1]
- APrecision = APrecisions_fract + APrecisions_int
- print "Extracting FCBN complex, ins = %d outs = %d" % (ins, outs)
- # we'll fill in the weights and thresholds iteratively
- w_bin = range(ins*outs)
- thresholds = range(outs)
- #tep = np.linspace(-1,1,num=2**APrecision-1,endpoint=False) # Equidistant points between -1 and +1 (hardtanh)
- #step = step[1:] # Removing the -1 point for symmetrical quantization - hardtanh
- step = np.linspace(-1,1,num=2**(APrecision-1),endpoint=False) + 1./(2**(APrecisions_fract+1)) # This one make -0.5 and +0.5 with 2 bits
-
- if (numThresIntBits) is None:
- factor = 1
- else:
- factor = 2**(numThresBits - numThresIntBits)
-
- for neuron in range(outs):
- need_flip = 0
+# note that the neurons are assumed to be in the columns of the weight matrix
+def makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_fract, APrecisions_fract, \
+ WPrecisions_int, APrecisions_int, usePopCount=False, use_rowmajor=True, numThresBits=16, numThresIntBits=None):
+
+ ins = weights.shape[0]
+ outs = weights.shape[1]
+ APrecision = APrecisions_fract + APrecisions_int
+ print("Extracting FCBN complex, ins = %d outs = %d" % (ins, outs))
+ # compute a preliminary thresholds from the batchnorm parameters
+ if (APrecision == 1):
+ step = np.zeros(1, dtype=np.float64)
+ else:
+ # This one make -0.5 and +0.5 with 2 bits
+ step = np.linspace(-1,1,num=2**(APrecision-1), endpoint=False, dtype=np.float64) + 1./(2**(APrecisions_fract+1))
+ # step = np.linspace(-1,1,num=2**APrecision-1,endpoint=False) # Equidistant points between -1 and +1 (hardtanh)
+ # step = step[1:] # Removing the -1 point for symmetrical quantization - hardtanh
+ thresholds = np.zeros((len(step),len(mean)), dtype=np.float64)
+ for i in range(len(step)):
+ thresholds[i] = (mean - bias) + ((step[i] - beta) / (gamma*invstd))
# ensure all neurons activate on the "positive" side, so we can use
- thres = mean[neuron] - bias[neuron] + ((step - beta[neuron]) / (gamma[neuron]*invstd[neuron]))
# greater-than-threshold activation
- if gamma[neuron]*invstd[neuron] < 0:
- need_flip = 1
- thres = np.ceil(-factor*thres)
- else:
- thres = np.floor(factor*thres)
+ need_flip = np.sign(gamma)
+ factor = need_flip if numThresIntBits is None else need_flip * 2**(numThresBits - numThresIntBits)
+ thresholds = factor*thresholds
+ # turn threshold into "number of 1s" (popcount) instead of signed sum
+ if usePopCount:
+ thresholds = (ins + thresholds)/2
# Integer-like threshold
- # compute a preliminary threshold from the batchnorm parameters
- #thres = dtype=int64
- thresholds[neuron] = thres.astype(int)
- # binarize the synapses00
- for synapse in range(ins):
- # note how we change from col major to row major if requested
- dest_ind = neuron*ins+synapse if use_rowmajor else synapse*outs+neuron
- if need_flip:
- w_bin[dest_ind] = quantize(-weights[synapse][neuron],WPrecisions_int,WPrecisions_fract)
- else:
- w_bin[dest_ind] = quantize(weights[synapse][neuron],WPrecisions_int,WPrecisions_fract)
- # reshape the output as desired
- if use_rowmajor:
- w_bin = np.asarray(w_bin).reshape((outs, ins))
- else:
- w_bin = np.asarray(w_bin).reshape((ins, outs))
- return (w_bin, thresholds)
+ else:
+ thresholds = np.where(need_flip < 0, np.ceil(thresholds), np.floor(thresholds))
+ thresholds = thresholds.transpose(1,0).astype(np.int)
+
+ # generating weights
+ weights = weights * need_flip
+ weights = quantize(weights, WPrecisions_int, WPrecisions_fract)
+ # note how we change from col major to row major if requested
+ if use_rowmajor:
+ weights = weights.transpose(1,0)
+ return (weights, thresholds)
# binarize and pack convolutional layer weights into a matrix and compute
# thresholds from the conv bias and batchnorm parameters
-def makeConvBNComplex(weights, bias, beta, gamma, mean, invstd, interleaveChannels, WPrecisions_fract, APrecisions_fract, IPrecisions_fract, WPrecisions_int, APrecisions_int, IPrecisions_int,usePopCount=True,numThresBits=16, numThresIntBits=None):
- WPrecision = WPrecisions_fract + WPrecisions_int
- APrecision = APrecisions_fract + APrecisions_int
- IPrecision = IPrecisions_fract + IPrecisions_int
- numOut = weights.shape[0]
- numIn = weights.shape[1]
- k = weights.shape[2]
- if(k != weights.shape[3]):
- raise "Nonsymmetric conv kernels are not yet supported"
- print "Extracting conv-BN complex, OFM=%d IFM=%d k=%d" % (numOut, numIn, k)
- # the fanin is used to ensure positive-only threshold
- fanin = numIn * k * k
- w_bin = range(numOut * numIn * k * k)
- # one threshold per output channel
- thresholds = range(numOut)
- dest_ind = 0
- step = np.linspace(-1,1,num=2**(APrecision-1),endpoint=False) + 1./(2**(APrecisions_fract+1)) # This one make -0.5 and +0.5 with 2 bits
- # we'll fill in the binarized weights and thresholds iteratively
- for neuron in range(numOut):
+def makeConvBNComplex(weights, bias, beta, gamma, mean, invstd, interleaveChannels, \
+ WPrecisions_fract, APrecisions_fract, IPrecisions_fract, \
+ WPrecisions_int, APrecisions_int, IPrecisions_int, usePopCount=True, numThresBits=16, numThresIntBits=None):
+
+ WPrecision = WPrecisions_fract + WPrecisions_int
+ APrecision = APrecisions_fract + APrecisions_int
+ IPrecision = IPrecisions_fract + IPrecisions_int
+ numOut = weights.shape[0]
+ numIn = weights.shape[1]
+ k = weights.shape[2]
+ # the fanin is used to ensure positive-only threshold
+ fanin = numIn * k * k
+ if(k != weights.shape[3]):
+ raise Exception("Nonsymmetric conv kernels are not yet supported")
+ print("Extracting conv-BN complex, OFM=%d IFM=%d k=%d" % (numOut, numIn, k))
+
# compute a preliminary threshold from the batchnorm parameters,
# subtracting the conv bias from the batchnorm mean
- thres = (mean[neuron] - bias[neuron]) - (beta[neuron] / (gamma[neuron]*invstd[neuron]))
- need_flip = 0
-
- if (numThresIntBits) is None:
- factor = 1
+ if (APrecision == 1):
+ step = np.zeros(1, dtype=np.float64)
else:
- factor = 2**(numThresBits - numThresIntBits)
-
+ # This one make -0.5 and +0.5 with 2 bits
+ step = np.linspace(-1,1,num=2**(APrecision-1), endpoint=False, dtype=np.float64) + 1./(2**(APrecisions_fract+1))
+ # step = np.linspace(-1,1,num=2**APrecision-1,endpoint=False) # Equidistant points between -1 and +1 (hardtanh)
+ # step = step[1:] # Removing the -1 point for symmetrical quantization - hardtanh
+ thresholds = np.zeros((len(step),len(mean)), dtype=np.float64)
+ for i in range(len(step)):
+ thresholds[i] = (mean - bias) + ((step[i] - beta) / (gamma*invstd))
# ensure all neurons activate on the "positive" side, so we can use
# greater-than-threshold activation
- if (APrecision==1):
- if gamma[neuron]*invstd[neuron] < 0:
- need_flip = 1
- thres = np.ceil(-factor*thres)
- else:
- thres = np.floor(factor*thres)
- # turn threshold into "number of 1s" (popcount) instead of signed sum
- if usePopCount:
- thresholds[neuron] = int((fanin + thres) / 2)
- else:
- thresholds[neuron] = thres
- else:
- if gamma[neuron]*invstd[neuron] < 0:
- need_flip = 1
- thres = mean[neuron] - bias[neuron] + ((step - beta[neuron]) / (gamma[neuron]*invstd[neuron]))
- thres = np.ceil(-factor*thres)
- else:
- thres = mean[neuron] - bias[neuron] + ((step - beta[neuron]) / (gamma[neuron]*invstd[neuron]))
- thres = np.floor(factor*thres)
- thresholds[neuron] = thres#thres.astype(int)
- # go through each weight of each convolutional kernel
+ need_flip = np.sign(gamma)
+ factor = need_flip if numThresIntBits is None else need_flip * 2**(numThresBits - numThresIntBits)
+ thresholds = factor*thresholds
+ thresholds = np.where(need_flip < 0, np.ceil(thresholds), np.floor(thresholds))
+ # turn threshold into "number of 1s" (popcount) instead of signed sum
+ if usePopCount and APrecision==1:
+ thresholds = (fanin + thresholds)/2
+ thresholds = thresholds.transpose(1,0).astype(np.int)
+
+ # generating weights
+ weights = weights * need_flip.reshape(-1,1,1,1)
+ weights = quantize(weights, WPrecisions_int, WPrecisions_fract)
if interleaveChannels:
- for ky in range(k):
- for kx in range(k):
- for ifm in range(numIn):
- f = -1 if need_flip else +1
- w_bin[dest_ind] = quantize(f*weights[neuron][ifm][ky][kx],WPrecisions_int,WPrecisions_fract) # If use quantize should work in both cases
- dest_ind += 1
- else:
- for ifm in range(numIn):
- for ky in range(k):
- for kx in range(k):
- f = -1 if need_flip else +1
- w_bin[dest_ind] = quantize(f*weights[neuron][ifm][ky][kx],WPrecisions_int,WPrecisions_fract)
- dest_ind += 1
-
- # reshape the output as desired
- w_bin = np.asarray(w_bin).reshape((numOut, fanin))
- return (w_bin, thresholds)
-
+ weights = np.moveaxis(weights, 1, -1)
+ weights = weights.reshape((numOut, fanin))
+ return (weights, thresholds)
# pull out data from a numpy archive containing layer parameters
# this should ideally be done using Lasagne, but this is simpler and works
class BNNWeightReader:
- def __init__(self, paramFile, interleaveChannels):
- self.paramDict = np.load(paramFile)
- self.currentParamInd = 0
- self.interleaveChannels = interleaveChannels
- self.numInterleaveChannels = 0
-
- def __getCurrent(self):
- ret = self.paramDict["arr_" + str(self.currentParamInd)]
- self.currentParamInd += 1
- return ret
-
- def readWeightsRaw(self):
- w = self.__getCurrent()
- return w
-
- def readBatchNormLayerRaw(self):
- bias = self.__getCurrent()
- beta = self.__getCurrent()
- gamma = self.__getCurrent()
- mean = self.__getCurrent()
- invstd = self.__getCurrent()
- return (bias, beta, gamma, mean, invstd)
-
- # read a fully connected layer plus batchnorm, binarize and convert to
- # positive threshold form, returning (bin weight matrix, thresholds)
- # the returned bin weight matrix has neurons along rows and is suitable
- # to be packed into BNN mems using BNNProcElemMem
- def readFCBNComplex(self, WPrecisions_fract, APrecisions_fract, IPrecisions_fract, WPrecisions_int, APrecisions_int, IPrecisions_int, numThresBits=16, numThresIntBits=None):
- WPrecision = WPrecisions_fract + WPrecisions_int
- APrecision = APrecisions_fract + APrecisions_int
- IPrecision = IPrecisions_fract + IPrecisions_int
- weights = self.readWeightsRaw()
- (bias, beta, gamma, mean, invstd) = self.readBatchNormLayerRaw()
+ def __init__(self, paramFile, interleaveChannels):
+ self.paramDict = np.load(paramFile)
+ self.currentParamInd = 0
+ self.interleaveChannels = interleaveChannels
+ self.numInterleaveChannels = 0
- if WPrecision==1 and APrecision==1 and IPrecision==1:
- (Wb, T) = makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_int, WPrecisions_fract, use_rowmajor=True)
- elif (APrecision==1):
- (Wb, T) = makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_int, WPrecisions_fract, use_rowmajor=True, usePopCount=False)
- else:
- (Wb, T) = makeFCBNComplex_QNN(weights, bias, beta, gamma, mean, invstd, WPrecisions_fract, APrecisions_fract, WPrecisions_int, APrecisions_int, True, numThresBits, numThresIntBits)
- # if the interleave flag is set, permute elements in each row
- if self.interleaveChannels and self.numInterleaveChannels != 0:
- print "Interleaving %d channels in fully connected layer..." % self.numInterleaveChannels
- pixPerChan = Wb.shape[1] / self.numInterleaveChannels
- if (APrecisions_fract == 0):
- Wb_perm = np.zeros(Wb.shape, dtype=np.int)
- else:
- Wb_perm = np.zeros(Wb.shape, dtype=np.float)
- for r in range(Wb.shape[0]):
- for chan in range(self.numInterleaveChannels):
- for cpix in range(pixPerChan):
- Wb_perm[r][cpix*self.numInterleaveChannels + chan] = Wb[r][chan*pixPerChan + cpix]
- Wb = Wb_perm
- # set interleave to zero once we go past this fc layer
- self.numInterleaveChannels = 0
-
- return (Wb, T)
+ def __getCurrent(self):
+ ret = self.paramDict["arr_" + str(self.currentParamInd)]
+ self.currentParamInd += 1
+ return ret.astype(np.float64)
+
+ def readWeightsRaw(self):
+ w = self.__getCurrent()
+ return w
+
+ def readBatchNormLayerRaw(self, read_bias=True):
+ bias = self.__getCurrent() if read_bias else None
+ beta = self.__getCurrent()
+ gamma = self.__getCurrent()
+ mean = self.__getCurrent()
+ invstd = self.__getCurrent()
+ return (bias, beta, gamma, mean, invstd)
+
+ # read a fully connected layer plus batchnorm, binarize and convert to
+ # positive threshold form, returning (bin weight matrix, thresholds)
+ # the returned bin weight matrix has neurons along rows and is suitable
+ # to be packed into BNN mems using BNNProcElemMem
+ def readFCBNComplex(self, WPrecisions_fract, APrecisions_fract, IPrecisions_fract, \
+ WPrecisions_int, APrecisions_int, IPrecisions_int, numThresBits=16, numThresIntBits=None):
+
+ WPrecision = WPrecisions_fract + WPrecisions_int
+ APrecision = APrecisions_fract + APrecisions_int
+ IPrecision = IPrecisions_fract + IPrecisions_int
+ weights = self.readWeightsRaw()
+ (bias, beta, gamma, mean, invstd) = self.readBatchNormLayerRaw()
+
+ if (WPrecision == 1) and (APrecision == 1) and (IPrecision == 1):
+ (Wb, T) = makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_fract, APrecisions_fract, \
+ WPrecisions_int, APrecisions_int, usePopCount=True)
+ else:
+ (Wb, T) = makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_fract, APrecisions_fract, \
+ WPrecisions_int, APrecisions_int, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
+
+ # if the interleave flag is set, permute elements in each row
+ if self.interleaveChannels and self.numInterleaveChannels != 0:
+ print("Interleaving %d channels in fully connected layer..." % self.numInterleaveChannels)
+ Wb = Wb.reshape(Wb.shape[0], self.numInterleaveChannels, -1)
+ Wb = Wb.swapaxes(1,-1).reshape(Wb.shape[0], -1)
+ # set interleave to zero once we go past this fc layer
+ self.numInterleaveChannels = 0
+ return (Wb, T)
# read a fully connected layer without batchnorm and without using thresholds,
# returning bin weight matrix
# the returned bin weight matrix has neurons along rows and is suitable
# to be packed into BNN mems using BNNProcElemMem
- def readFCBNComplex_no_thresholds(self, WPrecisions_fract, APrecisions_fract, IPrecisions_fract, WPrecisions_int, APrecisions_int, IPrecisions_int, numThresBits=16, numThresIntBits=None):
- WPrecision = WPrecisions_fract + WPrecisions_int
- APrecision = APrecisions_fract + APrecisions_int
- IPrecision = IPrecisions_fract + IPrecisions_int
-
- weights = self.readWeightsRaw()
-
- #fake the batchnorm params to use same make functions below
- bias = np.zeros(weights.shape[1])
- beta = np.zeros(weights.shape[1])
- gamma = np.ones(weights.shape[1])
- mean = np.ones(weights.shape[1])
- invstd = np.ones(weights.shape[1])
-
- if (WPrecision == 1) and (APrecision == 1) and (IPrecision == 1):
- (Wb, T) = makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_int, WPrecisions_fract, use_rowmajor=True)
- elif (APrecision==1):
- (Wb, T) = makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_int, WPrecisions_fract, use_rowmajor=True, usePopCount=False)
- else:
- (Wb, T) = makeFCBNComplex_QNN(weights, bias, beta, gamma, mean, invstd, WPrecisions_fract, APrecisions_fract, WPrecisions_int, APrecisions_int, True, numThresBits, numThresIntBits)
-
- # if the interleave flag is set, permute elements in each row
- if self.interleaveChannels and self.numInterleaveChannels != 0:
- print ("Interleaving %d channels in fully connected layer..." % self.numInterleaveChannels)
- pixPerChan = Wb.shape[1] / self.numInterleaveChannels
- if (APrecisions_fract == 0):
- Wb_perm = np.zeros(Wb.shape, dtype=np.int)
+ def readFCBNComplex_no_thresholds(self, WPrecisions_fract, APrecisions_fract, IPrecisions_fract, \
+ WPrecisions_int, APrecisions_int, IPrecisions_int, numThresBits=16, numThresIntBits=None):
+
+ WPrecision = WPrecisions_fract + WPrecisions_int
+ APrecision = APrecisions_fract + APrecisions_int
+ IPrecision = IPrecisions_fract + IPrecisions_int
+
+ weights = self.readWeightsRaw()
+ (_, _, gamma, _, _) = self.readBatchNormLayerRaw(read_bias=False)
+
+ #fake the batchnorm params to use same make functions below
+ bias = np.zeros(weights.shape[1])
+ beta = np.zeros(weights.shape[1])
+ #read gamma in case if it has a negative sign, we have to invert the weights
+ gamma = gamma*np.ones(weights.shape[1])
+ mean = np.ones(weights.shape[1])
+ invstd = np.ones(weights.shape[1])
+
+ if (WPrecision == 1) and (APrecision == 1) and (IPrecision == 1):
+ (Wb, T) = makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_fract, APrecisions_fract, \
+ WPrecisions_int, APrecisions_int, usePopCount=True)
else:
- Wb_perm = np.zeros(Wb.shape, dtype=np.float)
- for r in range(Wb.shape[0]):
- for chan in range(self.numInterleaveChannels):
- for cpix in range(pixPerChan):
- Wb_perm[r][cpix*self.numInterleaveChannels + chan] = Wb[r][chan*pixPerChan + cpix]
- Wb = Wb_perm
- # set interleave to zero once we go past this fc layer
- self.numInterleaveChannels = 0
-
- return (Wb, T)
-
- # read a convolutional layer plus batchnorm, binarize and convert to
- # positive threshold form, returning (bin weight matrix, thresholds)
- # the returned bin weight matrix is suitable to be packed into BNN mems
- def readConvBNComplex(self, WPrecisions_fract, APrecisions_fract, IPrecisions_fract, WPrecisions_int, APrecisions_int, IPrecisions_int, usePopCount=True,numThresBits=16, numThresIntBits=None):
- weights = self.readWeightsRaw()
- (bias, beta, gamma, mean, invstd) = self.readBatchNormLayerRaw()
- # keep track of output channels for use in FC layer interleave
- self.numInterleaveChannels = weights.shape[0]
- (Wb, T) = makeConvBNComplex(weights, bias, beta, gamma, mean, invstd, self.interleaveChannels, WPrecisions_fract, APrecisions_fract, IPrecisions_fract, WPrecisions_int, APrecisions_int, IPrecisions_int, usePopCount=usePopCount, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
- return (Wb, T)
+ (Wb, T) = makeFCBNComplex(weights, bias, beta, gamma, mean, invstd, WPrecisions_fract, APrecisions_fract, \
+ WPrecisions_int, APrecisions_int, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
+
+ # if the interleave flag is set, permute elements in each row
+ if self.interleaveChannels and self.numInterleaveChannels != 0:
+ print("Interleaving %d channels in fully connected layer..." % self.numInterleaveChannels)
+ Wb = Wb.reshape(Wb.shape[0], self.numInterleaveChannels, -1)
+ Wb = Wb.swapaxes(1,-1).reshape(Wb.shape[0], -1)
+ # set interleave to zero once we go past this fc layer
+ self.numInterleaveChannels = 0
+ return (Wb, T)
+
+ # read a convolutional layer plus batchnorm, binarize and convert to
+ # positive threshold form, returning (bin weight matrix, thresholds)
+ # the returned bin weight matrix is suitable to be packed into BNN mems
+ def readConvBNComplex(self, WPrecisions_fract, APrecisions_fract, IPrecisions_fract, \
+ WPrecisions_int, APrecisions_int, IPrecisions_int, usePopCount=True,numThresBits=16, numThresIntBits=None):
+
+ weights = self.readWeightsRaw()
+ (bias, beta, gamma, mean, invstd) = self.readBatchNormLayerRaw()
+ # keep track of output channels for use in FC layer interleave
+ self.numInterleaveChannels = weights.shape[0]
+ (Wb, T) = makeConvBNComplex(weights, bias, beta, gamma, mean, invstd, self.interleaveChannels, \
+ WPrecisions_fract, APrecisions_fract, IPrecisions_fract, WPrecisions_int, APrecisions_int, IPrecisions_int, \
+ usePopCount=usePopCount, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
+ return (Wb, T)
# create a 2D array of zeroes for the PE memories
def makeEmptyPEMems(numPE, memDepth, initVal):
- ret = []
-
- for i in range(numPE):
- ret += [ [initVal for i in range(memDepth)] ]
- return ret
+ ret = np.full((numPE, memDepth), initVal)
+ return ret
# ensure no non-binary weight values while packing
def ensureBinary(x):
- for i in x:
- if i != 0 and i != 1:
- raise "Non-binary values found in BNN weight data"
-
-# turn a binary array into a string representation with MSB on the left
-# e.g. [A, B, C, D] becomes "DCBA"
-def binArrayToString(x):
- ensureBinary(x)
- return reduce(lambda x,y: str(x)+str(y), np.flipud(x), "")
+ temp = np.where(x != 0, 1, x)
+ temp = np.where(x != 1, 0, temp)
+ if not np.array_equal(x,temp):
+ raise Exception("Non-binary values found in BNN weight data")
# Encode the array as a single integer number
# The array contains all the values that has to be encoded
# in a single ap_uint.
-def ArrayToString(array, precision, precFract=0, debug=False):
- val = 0
- #for i in range(len(array)-1, -1, -1):
- for i in range(len(array)):
- tmp = array[i]
- tmp2 = tmp * (2**precFract)
-
- if tmp < 0:
- tmp2 = 2**(precision) + (tmp * (2**precFract))
-
- tmp2 = int(tmp2)
- tmp3 = tmp2 * 2**(precision*i)
- val = val + tmp3
-
- return val
+def ArrayToAp_uints(array, precision, precFract=0):
+ if precision == 1:
+ ensureBinary(array)
+ datatype = np.int64
+ else:
+ array = array * (1 << precFract)
+ array = np.where(array < 0, array+(1 << precision), array).astype(np.uint64)
+ datatype = np.uint64
+ factor = 1 << precision*np.arange(array.shape[-1], dtype=datatype)
+ val = array.dot(factor)
+ return val
# pack one or several BNN layers into PE on-chip memories, and create
@@ -487,268 +399,196 @@ def ArrayToString(array, precision, precFract=0, debug=False):
# note that no binarization or quantization is performed
# If numThresIntBits is not none, weights produced will be fixed point numbers.
class BNNProcElemMem:
- def __init__(self, numPE, numSIMD, weightMemDepth, thresMemDepth, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional, numThresBits=16, numThresIntBits=None):
- APrecision = APrecision_integer+APrecision_fractional
- WPrecision = WPrecision_integer+WPrecision_fractional
- IPrecision = IPrecision_integer+IPrecision_fractional
- self.lenExtMemWord = 64
- self.numPE = numPE
- self.numSIMD = numSIMD
- self.numThresBits = numThresBits
- self.numThresIntBits = numThresIntBits
- self.APrecisionInt = APrecision_integer
- self.APrecisionFract = APrecision_fractional
- self.APrecision = APrecision
- self.numThresholds = 2**(APrecision - 1)
- self.IPrecision = IPrecision
- self.IPrecisionInt = IPrecision_integer
- self.IPrecisionFract = IPrecision_fractional
- self.WPrecision = WPrecision
- self.WPrecisionInt = WPrecision_integer
- self.WPrecisionFract = WPrecision_fractional
- self.weightMemDepth = weightMemDepth
- self.thresMemDepth = thresMemDepth
- self.weightMemHead = [0 for i in range(numPE)]
- self.thresMemHead = [0 for i in range(numPE)]
- # note that these memories are 2D: [PE index, mem index]
- if self.WPrecision==1:
- self.weightMem = makeEmptyPEMems(self.numPE, self.weightMemDepth, "1" * numSIMD)
- else:
- pad_word = ArrayToString(np.full((self.numSIMD),0), self.WPrecision)
- self.weightMem = makeEmptyPEMems(self.numPE, self.weightMemDepth, pad_word)
- self.thresMem = makeEmptyPEMems(self.numPE, self.thresMemDepth, 0)
- self.AccuOffset = 0
- self.neuronPad = []
- self.synapsePad = []
- self.layerSizes = []
- self.layerHeadsW = []
- self.layerHeadsT = []
-
- def __padMatrix(self, A, T, padW=0, padH=0):
- n = A.shape[0]
- s = A.shape[1]
- # ensure number of rows (neurons) is divisable by PE count
- padN = padH - n #self.numPE - (n % self.numPE) if n % self.numPE != 0 else 0
- # ensure number of cols (synapses per neuron) is divisable by SIMD width
- padS = padW - s #self.numSIMD - (s % self.numSIMD) if s % self.numSIMD != 0 else 0
- # create padded version of matrix
- # use 1 bits to pad matrix, 0 bits to pad input
- if self.WPrecision==1:
- Ap = np.pad(A, ((0, padN), (0, padS)), 'constant', constant_values=1 )
- else:
-# pad_word = ArrayToString(np.full((self.numSIMD),1), self.WPrecision)
- Ap = np.pad(A, ((0, padN), (0, padS)), 'constant', constant_values=0 )
- # pad thresholds
- max_thres = pow(2, self.numThresBits) - 1
- if self.APrecision==1:
- Tp = np.pad(T, ((0, padN)), 'constant', constant_values=max_thres)
- else:
- Tp = np.pad(T, ((0, padN), (0, 0)), 'constant', constant_values=max_thres)
- #Tp = np.pad(T, ((0, padN)), 'constant', constant_values=max_thres)
- # keep track of how much padding we added
- self.neuronPad += [padN]
- self.synapsePad += [padS]
- if (self.WPrecision==1 and self.APrecision==1 and self.IPrecision==1) or (self.WPrecision>=2):
- self.AccuOffset = 0
- else:
- self.AccuOffset = padS
- return (Ap, Tp)
-
- def __updatePEMapping(self, A, T):
- # TODO also update threshold memories
- # should only be called internally, and on a matrix that is already padded
- n = A.shape[0]
- s = A.shape[1]
- if n % self.numPE != 0:
- raise "Matrix height must be multiple of PE count"
- if s % self.numSIMD != 0:
- raise "Matrix width must be multiple of SIMD width"
- if n != T.shape[0]:
- raise "Number of neurons and thresholds do not match"
- # reshape and copy into PE memories
- neuronsPerPE = n / self.numPE
- synGroupsPerNeuron = s / self.numSIMD
- # TODO check that there is enough room in the PE memory
- self.layerHeadsW += [ self.weightMemHead[0] ]
- self.layerHeadsT += [ self.thresMemHead[0] ]
- M=A.reshape((n, synGroupsPerNeuron, self.numSIMD))
- self.layerSizes += [(n,s)]
- for i in range(n):
- # interleave matrix rows between different PEs
- targetPE = i % self.numPE
- targetBase = self.weightMemHead[targetPE]
- self.thresMem[targetPE][self.thresMemHead[targetPE]] = T[i] - self.AccuOffset # We have to add the AccuOffset if padding is not transparent
- for j in range(synGroupsPerNeuron):
- if (self.WPrecision==1):
- self.weightMem[targetPE][targetBase+j] = binArrayToString(M[i][j])
+ def __init__(self, numPE, numSIMD, weightMemDepth, thresMemDepth, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer,\
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional, numThresBits=16, numThresIntBits=None):
+
+ APrecision = APrecision_integer+APrecision_fractional
+ WPrecision = WPrecision_integer+WPrecision_fractional
+ IPrecision = IPrecision_integer+IPrecision_fractional
+ self.numPE = numPE
+ self.numSIMD = numSIMD
+ if self.numSIMD*WPrecision > 64:
+ raise Exception("SIMD*(WPrecision_integer+WPrecision_fractional) = {} which needs to be <= 64 for now. ".format(\
+ self.numSIMD*WPrecision) +
+ "If you wish extend the finnthesizer to support this, start with the old one " + \
+ "(from previous commits).")
+
+ self.numThresBits = numThresBits
+ self.numThresIntBits = numThresIntBits
+ self.APrecisionInt = APrecision_integer
+ self.APrecisionFract = APrecision_fractional
+ self.APrecision = APrecision
+ self.numThresholds = 2**(APrecision - 1)
+ self.IPrecision = IPrecision
+ self.IPrecisionInt = IPrecision_integer
+ self.IPrecisionFract = IPrecision_fractional
+ self.WPrecision = WPrecision
+ self.WPrecisionInt = WPrecision_integer
+ self.WPrecisionFract = WPrecision_fractional
+ self.weightMemDepth = weightMemDepth
+ self.thresMemDepth = thresMemDepth
+ # note that these memories are 2D: [PE index, mem index]
+ if self.WPrecision==1:
+ self.weightMem = makeEmptyPEMems(self.numPE, self.weightMemDepth, "1" * numSIMD)
else:
- self.weightMem[targetPE][targetBase+j] = ArrayToString(M[i][j], self.WPrecision)
- # update the memory head pointers for the target PE
- self.weightMemHead[targetPE] += synGroupsPerNeuron
- self.thresMemHead[targetPE] += 1
-
- def addMatrix(self, W, T, padW=0, padH=0):
- # add padding
- if self.numThresIntBits is None:
+ pad_word = ArrayToAp_uints(np.full((self.numSIMD),0), self.WPrecision)
+ self.weightMem = makeEmptyPEMems(self.numPE, self.weightMemDepth, pad_word)
+ self.thresMem = makeEmptyPEMems(self.numPE, self.thresMemDepth, 0)
+ self.AccuOffset = 0
+ self.neuronPad = []
+ self.synapsePad = []
+ self.layerSizes = []
+
+ def __padMatrix(self, A, T, padW=0, padH=0):
+ n = A.shape[0]
+ s = A.shape[1]
+ # ensure number of rows (neurons) is divisable by PE count
+ padN = padH - n
+ # ensure number of cols (synapses per neuron) is divisable by SIMD width
+ padS = padW - s
+ # create padded version of matrix
+ # use 1 bits to pad matrix, 0 bits to pad input
+ const = 1 if self.WPrecision==1 else 0
+ Ap = np.pad(A, ((0, padN), (0, padS)), 'constant', constant_values=const)
+ # pad thresholds
+ max_thres = pow(2, self.numThresBits) - 1
+ Tp = np.pad(T, ((0, padN), (0, 0)), 'constant', constant_values=max_thres)
+ if self.APrecision==1:
+ Tp = Tp.reshape(-1,)
+ # keep track of how much padding we added
+ self.neuronPad += [padN]
+ self.synapsePad += [padS]
+ if (self.WPrecision==1 and self.APrecision==1 and self.IPrecision==1) or (self.WPrecision>=2):
+ self.AccuOffset = 0
+ else:
+ self.AccuOffset = padS
+ return (Ap, Tp)
+
+ def __updatePEMapping(self, A, T):
+ # TODO also update threshold memories
+ # should only be called internally, and on a matrix that is already padded
+ n = A.shape[0]
+ s = A.shape[1]
+ if n % self.numPE != 0:
+ raise Exception("Matrix height must be multiple of PE count")
+ if s % self.numSIMD != 0:
+ raise Exception("Matrix width must be multiple of SIMD width")
+ if n != T.shape[0]:
+ raise Exception("Number of neurons and thresholds do not match")
+ # reshape and copy into PE memories
+ neuronsPerPE = n // self.numPE
+ synGroupsPerNeuron = s // self.numSIMD
+
+ M = A.reshape((n, synGroupsPerNeuron, self.numSIMD))
+ self.layerSizes += [(n,s)]
+
+ M = ArrayToAp_uints(M, self.WPrecision, self.WPrecisionFract)
+
+ tempw = np.split(M, neuronsPerPE, axis=0)
+ tempw = np.asarray(tempw)
+ tempw = np.split(tempw, synGroupsPerNeuron, axis=-1)
+ tempw = np.asarray(tempw).swapaxes(0,2)
+ tempw = tempw.reshape(tempw.shape[0], -1)
+
+ T = T - self.AccuOffset # We have to add the AccuOffset if padding is not transparent
+ tempt = np.split(T, neuronsPerPE, axis=0)
+ tempt = np.array(tempt)
+ tempt = tempt.swapaxes(0,1)
+
+ self.weightMem = tempw
+ self.thresMem = tempt
+
+ if self.numThresIntBits is None:
+ # do saturation
+ saturate_max = (2**(self.numThresBits-1))-1
+ saturate_min = -(2**(self.numThresBits-1))
+ self.thresMem = np.clip(self.thresMem, saturate_min, saturate_max)
+
+ def addMatrix(self, W, T, padW=0, padH=0):
+ # add padding
+ if self.numThresIntBits is not None:
+ T = T.astype(np.int)/(2.**(self.numThresBits - self.numThresIntBits))
(Wp, Tp) = self.__padMatrix(W, T, padW, padH)
- else: # Convert thresholds to ints before updating the PE mapping.
- #Ti = map(lambda x: int(x*2**(self.numThresBits - self.numThresIntBits))/(2.**(self.numThresBits - self.numThresIntBits)), T)
- if (self.APrecision==1):
- Ti = map(lambda x: int(x*2**(self.numThresBits - self.numThresIntBits))/(2.**(self.numThresBits - self.numThresIntBits)), T)
+ # map to PE memories
+ self.__updatePEMapping(Wp, Tp)
+
+ def __makeHLSInit(self, x):
+ if x == 0:
+ return "0x0"
else:
- round_func = lambda x: int(x)/(2.**(self.numThresBits - self.numThresIntBits))
- Ti = map(np.vectorize(round_func), T)
- (Wp, Tp) = self.__padMatrix(W, Ti, padW, padH)
- # map to PE memories
- self.__updatePEMapping(Wp, Tp)
-
- def __makeHLSInit(self, x):
- if x == 0:
- return "0"
- else:
- if (self.WPrecision ==1):
- return hex(int(x, 2))
- else:
- return hex(int(x))
- # pack every word of the internal memory into a 64-bit integer and write
- # into a binary file
- # TODo support sizes > 64 bits.
- def __wmem2bin(self, mem, fileName, isBinaryString):
- import struct
- import bitstring
- lenExtMemWord=64
- outFile = open(fileName, "wb")
- for memInd in range(len(mem)):
- if isBinaryString:
- if len(mem[memInd]) > lenExtMemWord:
- raise "SIMD width needs to suit into 64 bit ExtMemWord for now"
- #pad to machine word
- if len(mem[memInd]) < lenExtMemWord:
- mem[memInd] = ('0' * (lenExtMemWord-len(mem[memInd]))) + mem[memInd]
- for b in range(lenExtMemWord/8) :
- subByte = mem[memInd][lenExtMemWord-(b+1)*8:lenExtMemWord-b*8]
- outFile.write(struct.pack("B",int(subByte,2)))
- else:
- if(len(bin(mem[memInd])) - 2 > lenExtMemWord):
- raise "Error weight exceeding width of ExtMemWord"
- if mem[memInd] >= 0:
- weightBin = bitstring.Bits(uint=mem[memInd], length=lenExtMemWord).unpack("bin:"+str(lenExtMemWord))[0]
+ return hex(np.uint64(x))
+
+ # pack every word of the internal memory into a 64-bit integer and write
+ # into a binary file
+ def __wmem2bin(self, mem, fileName):
+ mem.astype(np.uint64).tofile(fileName)
+
+ def __tmem2bin(self, mem, fileName):
+ if self.numThresIntBits is None:
+ if not np.array_equal(mem.astype(np.int), mem):
+ print("WARNING: Cannot pack non-int values into binary threshold file.")
+ print("The thresholds might be processed with wrong datatype. Check BNNProcElemMem \
+ arguments numThresBits and numThresIntBits to ensure correct fractional shift.")
else:
- weightBin = bitstring.Bits(int=mem[memInd], length=lenExtMemWord).unpack("bin:"+str(lenExtMemWord))[0]
- #write byte by byte
- for b in range(lenExtMemWord/8) :
- subByte = weightBin[lenExtMemWord-(b+1)*8:lenExtMemWord-b*8]
- outFile.write(struct.pack("B",int(subByte,2)))
- outFile.close()
-
- def __tmem2bin(self, mem, fileName, isBinaryString):
- import struct
- import bitstring
- lenExtMemWord=64
- outFile = open(fileName, "wb")
- for memInd in range(len(mem)):
- if isBinaryString:
- if len(mem[memInd]) > lenExtMemWord:
- raise "Threshold needs to fit into 64 bit ExtMemWord for now"
- if len(mem[memInd]) < lenExtMemWord:
- mem[memInd] = ('0' * (lenExtMemWord-len(mem[memInd]))) + mem[memInd]
- for b in range(lenExtMemWord/8) :
- subByte = mem[memInd][lenExtMemWord-(b+1)*8:lenExtMemWord-b*8]
- outFile.write(struct.pack("B",int(subByte,2)))
- else:
- #create list if APrecision==1
- if self.APrecision == 1:
- mem[memInd]=list([mem[memInd]])
- for numThresh in range(self.APrecision):
- #check wheter there are integer thresholds or floats
- if self.numThresIntBits is None:
- #do saturation
- accuWidth = self.numThresBits
- if mem[memInd][numThresh] > (2**(accuWidth-1))-1:
- saturate = (2**(accuWidth-1))-1
- #print "WARNING: Threshold is out of datatype (int" + str(accuWidth) + "). Thresh is " + str(mem[memInd][numThresh]) + ", saturated to " + str(saturate)
- mem[memInd][numThresh] = saturate
- elif mem[memInd][numThresh] < -(2**(accuWidth-1)):
- saturate = -(2**(accuWidth-1))
- #print "WARNING: Threshold is out of datatype (int" + str(accuWidth) + "). Thresh is " + str(mem[memInd][numThresh]) + ", saturated to " + str(saturate)
- mem[memInd][numThresh] = saturate
- if int(mem[memInd][numThresh]) != mem[memInd][numThresh]:
- print("WARNING: Cannot pack non-int values into binary threshold file.")
- print("The thresholds might be processed with wrong datatype. Check BNNProcElemMem arguments numThresBits and numThresIntBits to ensure correct fractional shift.")
- print("Packed will be ",int(mem[memInd][numThresh])," instead of ",mem[memInd][numThresh])
- #read as int
- threshBin = bitstring.Bits(int=int(mem[memInd][numThresh]), length=lenExtMemWord).unpack("bin:"+str(lenExtMemWord))[0]
- else:
- #read as fixed point to match with ap_fixed
- sizeFract = self.numThresBits - self.numThresIntBits
- intThresh = int(mem[memInd][numThresh] * (2**sizeFract))
- threshBin = bitstring.Bits(int=intThresh, length=lenExtMemWord).unpack("bin:"+str(lenExtMemWord))[0]
- #write byte by byte
- for b in range(lenExtMemWord/8):
- subByte = threshBin[lenExtMemWord-(b+1)*8:lenExtMemWord-b*8]
- outFile.write(struct.pack("B",int(subByte,2)))
- outFile.close()
-
-
- def createBinFiles(self, targetDir, prefix="", useThresholds=True):
- for pe in range(self.numPE):
- self.__wmem2bin(self.weightMem[pe], targetDir+"/"+prefix+"-"+str(pe)+"-weights.bin", self.WPrecision==1)
- if useThresholds:
- self.__tmem2bin(self.thresMem[pe], targetDir+"/"+prefix+"-"+str(pe)+"-thres.bin", False)
-
-# Finnthesizer HLS init files generation. Use these outputed header files for including params during bitstream generation
- def createHLSInitFiles(self, targetFile, varSuffix=""):
- outFile = open(targetFile , "wt")
- if self.WPrecision==1:
- wMemType = "ap_uint<1>"
- elif self.WPrecisionFract==0:
- wMemType = "ap_int<"+str(self.WPrecisionInt)+">"
- else:
- wMemType = "ap_fixed<"+str(self.WPrecision)+", "+str(self.WPrecisionFract)+", AP_RND_ZERO, AP_WRAP>"
- if self.numThresIntBits is None:
- tMemType = "ap_int<"+str(self.numThresBits)+">"
- else:
- tMemType = "ap_fixed<"+str(self.numThresBits)+", "+str(self.numThresIntBits)+">"
- if self.APrecision==1:
- ActType = "ap_uint<1>"
- elif self.WPrecisionFract==0:
- ActType = "ap_int<"+str(self.APrecisionInt)+">"
- else:
- ActType = "ap_fixed<"+str(self.APrecision)+", "+str(self.APrecisionFract)+", AP_RND_ZERO, AP_WRAP>"
- MinActVal = -1 # Minimum value of the output activations -> -1 if hardtanh, 0 with ReLu
- outFile.write("/*\nWeight and threshold memory initialization for Vivado HLS\n")
- outFile.write("PEs = %d, SIMD width = %d, threshold bits = %d\n" % (self.numPE, self.numSIMD, self.numThresBits))
- outFile.write("weight mem depth = %d, thres mem depth = %d\n" % (self.weightMemDepth, self.thresMemDepth))
- outFile.write("layer sizes (neurons, synapses per neuron): \n")
- outFile.writelines(["%s " % str(x) for x in self.layerSizes])
- outFile.write("\npadded neurons for each layer: \n")
- outFile.writelines(["%d " % x for x in self.neuronPad])
- outFile.write("\npadded synapses for each layer: \n")
- outFile.writelines(["%d " % x for x in self.synapsePad])
- outFile.write("\n*/\n\n")
- outFile.write("const unsigned int matrixH"+varSuffix+"[] = {%s};\n" % ", ".join(map(lambda x: str(x[0]), self.layerSizes)))
- outFile.write("const unsigned int matrixW"+varSuffix+"[] = {%s};\n" % ", ".join(map(lambda x: str(x[1]), self.layerSizes)))
- outFile.write("const unsigned int layerStartW"+varSuffix+"[] = {%s};\n" % ", ".join(map(str, self.layerHeadsW)))
- outFile.write("const unsigned int layerStartT"+varSuffix+"[] = {%s};\n\n" % ", ".join(map(str, self.layerHeadsT)))
-
- # write the weight memory init data
- # np.save("weightMem"+str(varSuffix)+".npy",self.weightMem)
- if self.WPrecision==1:
- outFile.write("BinaryWeights<%d,%d,%d> weights%s= {\n{\n" % (self.numSIMD, self.numPE, self.weightMemDepth, varSuffix))
- else:
- outFile.write("FixedPointWeights<%d,%s,%d,%d> weights%s= {\n{\n" % (self.numSIMD, wMemType, self.numPE, self.weightMemDepth, varSuffix))
- outFile.write(",".join(map(lambda pe:"{\n"+(",\n".join(map(self.__makeHLSInit, pe)))+"\n}", self.weightMem)))
- outFile.write("\n}\n};\n")
- # write the threshold memory init data
- # np.save("tresh"+str(varSuffix)+".npy",self.thresMem)
- if (self.numThresholds==1):
- outFile.write("ThresholdsActivation<%d,%d,%d,%s,%s> threshs%s = {\n{\n" % (self.thresMemDepth, self.numPE, self.numThresholds, tMemType, ActType, varSuffix))
- outFile.write(",".join(map(lambda pe:"{\n"+(",\n".join(map(str, pe) ))+"\n}", self.thresMem)))
- else:
- outFile.write("ThresholdsActivation<%d,%d,%d,%s,%s,%d> threshs%s = {\n{\n" % (self.thresMemDepth, self.numPE, self.numThresholds, tMemType, ActType, MinActVal, varSuffix))
- outFile.write(",".join(map(lambda pe:"{\n"+(",\n".join(map(lambda nthresh:"{\n"+",\n".join(map(str,nthresh))+"\n}", pe) ))+"\n}", self.thresMem)))
- #outFile.write(",".join(map(lambda pe: map(lambda nthresh:"{\n"+(",\n".join(map(str, nthresh) ))+"\n}", pe), self.thresMem)))
- outFile.write("\n}\n};\n")
- outFile.close()
-
+ mem = mem * (1 << (self.numThresBits - self.numThresIntBits))
+ mem.astype(np.int64).tofile(fileName)
+
+ def createBinFiles(self, targetDir, prefix="", useThresholds=True):
+ for pe in range(self.numPE):
+ self.__wmem2bin(self.weightMem[pe], targetDir+"/"+prefix+"-"+str(pe)+"-weights.bin")
+ if useThresholds:
+ self.__tmem2bin(self.thresMem[pe], targetDir+"/"+prefix+"-"+str(pe)+"-thres.bin")
+
+ # Finnthesizer HLS init files generation. Use these outputed header files for including params during bitstream generation
+ def createHLSInitFiles(self, targetFile, varSuffix="", useThresholds=True):
+ outFile = open(targetFile , "wt")
+ if self.WPrecision==1:
+ wMemType = "ap_uint<1>"
+ elif self.WPrecisionFract==0:
+ wMemType = "ap_int<"+str(self.WPrecisionInt)+">"
+ else:
+ wMemType = "ap_fixed<"+str(self.WPrecision)+", "+str(self.WPrecisionFract)+", AP_RND_ZERO, AP_WRAP>"
+ if self.numThresIntBits is None:
+ tMemType = "ap_int<"+str(self.numThresBits)+">"
+ else:
+ tMemType = "ap_fixed<"+str(self.numThresBits)+", "+str(self.numThresIntBits)+">"
+ if self.APrecision==1:
+ ActType = "ap_uint<1>"
+ elif self.WPrecisionFract==0:
+ ActType = "ap_int<"+str(self.APrecisionInt)+">"
+ else:
+ ActType = "ap_fixed<"+str(self.APrecision)+", "+str(self.APrecisionFract)+", AP_RND_ZERO, AP_WRAP>"
+ MinActVal = -1 # Minimum value of the output activations -> -1 if hardtanh, 0 with ReLu
+ outFile.write("/*\nWeight and threshold memory initialization for Vivado HLS\n")
+ outFile.write("PEs = %d, SIMD width = %d, threshold bits = %d\n" % (self.numPE, self.numSIMD, self.numThresBits))
+ outFile.write("weight mem depth = %d, thres mem depth = %d\n" % (self.weightMemDepth, self.thresMemDepth))
+ outFile.write("layer sizes (neurons, synapses per neuron): \n")
+ outFile.writelines(["%s " % str(x) for x in self.layerSizes])
+ outFile.write("\npadded neurons for each layer: \n")
+ outFile.writelines(["%d " % x for x in self.neuronPad])
+ outFile.write("\npadded synapses for each layer: \n")
+ outFile.writelines(["%d " % x for x in self.synapsePad])
+ outFile.write("\n*/\n\n")
+
+ # write the weight memory init data
+ # np.save("weightMem"+str(varSuffix)+".npy",self.weightMem)
+ if self.WPrecision==1:
+ outFile.write("static BinaryWeights<%d,%d,%d> weights%s= {\n{\n" % (self.numSIMD, self.numPE, self.weightMemDepth, varSuffix))
+ else:
+ outFile.write("static FixedPointWeights<%d,%s,%d,%d> weights%s= {\n{\n" % (self.numSIMD, wMemType, self.numPE, self.weightMemDepth, varSuffix))
+ outFile.write(",".join(["{\n"+(",\n".join(map(self.__makeHLSInit, pe)))+"\n}" for pe in self.weightMem]))
+ outFile.write("\n}\n};\n")
+
+ # write the threshold memory init data
+ if useThresholds:
+ # np.save("threshMem"+str(varSuffix)+".npy",self.thresMem)
+ if (self.numThresholds==1):
+ outFile.write("static ThresholdsActivation<%d,%d,%d,%s,%s> threshs%s = {\n{\n" % (self.thresMemDepth, self.numPE, self.numThresholds, tMemType, ActType, varSuffix))
+ outFile.write(",".join(["{\n"+(",\n".join(map(str, pe) ))+"\n}" for pe in self.thresMem]))
+ else:
+ outFile.write("static ThresholdsActivation<%d,%d,%d,%s,%s,%d> threshs%s = {\n{\n" % (self.thresMemDepth, self.numPE, self.numThresholds, tMemType, ActType, MinActVal, varSuffix))
+ outFile.write(",".join(["{\n"+(",\n".join(["{\n"+",\n".join(map(str,nthresh))+"\n}" for nthresh in pe] ))+"\n}" for pe in self.thresMem]))
+ outFile.write("\n}\n};\n")
+ outFile.close()
diff --git a/bnn/src/training/gtsrb-gen-binary-weights.py b/bnn/src/training/gtsrb-gen-binary-weights.py
index da6f5224..fa0abd7d 100644
--- a/bnn/src/training/gtsrb-gen-binary-weights.py
+++ b/bnn/src/training/gtsrb-gen-binary-weights.py
@@ -1,43 +1,42 @@
-#BSD 3-Clause License
-#=======
-#
-#Copyright (c) 2018, Xilinx
-#All rights reserved.
-#
-#Redistribution and use in source and binary forms, with or without
-#modification, are permitted provided that the following conditions are met:
-#
-#* Redistributions of source code must retain the above copyright notice, this
+# BSD 3-Clause License
+# =======
+
+# Copyright (c) 2020, Xilinx
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
-#
-#* Redistributions in binary form must reproduce the above copyright notice,
+
+# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
-#
-#* Neither the name of the copyright holder nor the names of its
+
+# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
-#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
-import sys
from finnthesizer import *
if __name__ == "__main__":
bnnRoot = "."
npzFile = bnnRoot + "/gtsrb-1w-1a.npz"
- targetDirBin = bnnRoot + "/binparam-cnvW1A1-pynq-gtrsb"
- targetDirHLS = bnnRoot + "/binparam-cnvW1A1-pynq-gtrsb/hw"
+ targetDirBin = bnnRoot + "/cnvW1A1-gtrsb"
+ targetDirHLS = bnnRoot + "/cnvW1A1-gtrsb/hw"
#topology of convolutional layers (only for config.h defines)
ifm = [32, 30, 14, 12, 5, 3]
ofm = [30, 28, 12, 10, 3, 1]
@@ -97,15 +96,15 @@
'End of no-overtaking zone for vehicles with a permitted gross weight over 3.5t including their trailers, and for tractors except passenger cars and buses',
'Not a roadsign'
]
-
+
#configuration of PE and SIMD counts
- peCounts = [16, 32, 16, 16, 4, 1, 1, 1, 4]
- simdCounts = [ 3, 32, 32, 32, 32, 32, 4, 8, 1]
+ peCounts = [16, 32, 16, 16, 4, 1, 1, 1, 4]
+ simdCounts = [ 3, 32, 32, 32, 32, 32, 4, 8, 1]
if not os.path.exists(targetDirBin):
- os.mkdir(targetDirBin)
+ os.mkdir(targetDirBin)
if not os.path.exists(targetDirHLS):
- os.mkdir(targetDirHLS)
+ os.mkdir(targetDirHLS)
#read weights
rHW = BNNWeightReader(npzFile, True)
@@ -115,112 +114,95 @@
config+= " *\n **/\n\n"
config+= "#ifndef __LAYER_CONFIG_H_\n#define __LAYER_CONFIG_H_\n\n"
+ # process convolutional layers
for convl in range(0, 6):
- peCount = peCounts[convl]
- simdCount = simdCounts[convl]
- WPrecision_fractional = WeightsPrecisions_fractional[convl]
- APrecision_fractional = ActivationPrecisions_fractional[convl]
- IPrecision_fractional = InputPrecisions_fractional[convl]
- WPrecision_integer = WeightsPrecisions_integer[convl]
- APrecision_integer = ActivationPrecisions_integer[convl]
- IPrecision_integer = InputPrecisions_integer[convl]
- print "Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, convl)
- if convl == 0:
+ peCount = peCounts[convl]
+ simdCount = simdCounts[convl]
+ WPrecision_fractional = WeightsPrecisions_fractional[convl]
+ APrecision_fractional = ActivationPrecisions_fractional[convl]
+ IPrecision_fractional = InputPrecisions_fractional[convl]
+ WPrecision_integer = WeightsPrecisions_integer[convl]
+ APrecision_integer = ActivationPrecisions_integer[convl]
+ IPrecision_integer = InputPrecisions_integer[convl]
+ print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, convl))
# use fixed point weights for the first layer
- (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer, usePopCount=False, numThresBits=24, numThresIntBits=16)
+ (usePopCount, numThresBits, numThresIntBits) = (False, 24, 16) if convl==0 else (True, 16, None)
+ (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ usePopCount=usePopCount, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
# compute the padded width and height
paddedH = padTo(w.shape[0], peCount)
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (convl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
-
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+ neededWMem = (paddedW * paddedH) // (simdCount * peCount)
+ neededTMem = paddedH // peCount
+ print("Layer %d: %d x %d" % (convl, paddedH, paddedW))
+ print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
+ print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, \
+ WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional))
+ m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
m.addMatrix(w,t,paddedW,paddedH)
-
-
- config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
-
+ config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, \
+ peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
+
+ #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
+ # m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
+
#generate binary weight and threshold files to initialize memory during runtime
#because HLS might not work for very large header files
m.createBinFiles(targetDirBin, str(convl))
- else:
- # regular binarized layer
- (w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
+ # process fully-connected layers
+ for fcl in range(6,9):
+ peCount = peCounts[fcl]
+ simdCount = simdCounts[fcl]
+ WPrecision_fractional = WeightsPrecisions_fractional[fcl]
+ APrecision_fractional = ActivationPrecisions_fractional[fcl]
+ IPrecision_fractional = InputPrecisions_fractional[fcl]
+ WPrecision_integer = WeightsPrecisions_integer[fcl]
+ APrecision_integer = ActivationPrecisions_integer[fcl]
+ IPrecision_integer = InputPrecisions_integer[fcl]
+ print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, fcl))
+ if fcl == 8:
+ (w,t) = rHW.readFCBNComplex_no_thresholds(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer)
+ paddedH = padTo(w.shape[0], 64)
+ useThresholds = False
+ else:
+ (w,t) = rHW.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
+ WPrecision_integer, APrecision_integer, IPrecision_integer)
+ paddedH = padTo(w.shape[0], peCount)
+ useThresholds = True
# compute the padded width and height
- paddedH = padTo(w.shape[0], peCount)
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (convl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+ neededWMem = (paddedW * paddedH) // (simdCount * peCount)
+ neededTMem = paddedH // peCount
+ print("Layer %d: %d x %d" % (fcl, paddedH, paddedW))
+ print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
+ print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,\
+ WPrecision_fractional, APrecision_integer, APrecision_fractional))
+ m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
+ WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
+
m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
-
+ config += (printFCDefines("L%d" % fcl, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, \
+ WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
+
+ #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
+ # m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(fcl) + ".h", str(fcl), useThresholds)
+
#generate binary weight and threshold files to initialize memory during runtime
#because HLS might not work for very large header files
- m.createBinFiles(targetDirBin, str(convl))
-
- # process fully-connected layers
- for fcl in range(6,9):
- peCount = peCounts[fcl]
- simdCount = simdCounts[fcl]
- WPrecision_fractional = WeightsPrecisions_fractional[fcl]
- APrecision_fractional = ActivationPrecisions_fractional[fcl]
- IPrecision_fractional = InputPrecisions_fractional[fcl]
- WPrecision_integer = WeightsPrecisions_integer[fcl]
- APrecision_integer = ActivationPrecisions_integer[fcl]
- IPrecision_integer = InputPrecisions_integer[fcl]
- print "Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, fcl)
-
- if fcl == 8:
- (w,t) = rHW.readFCBNComplex_no_thresholds(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
- paddedH = padTo(w.shape[0], 64)
- useThresholds = False
- else:
- (w,t) = rHW.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, WPrecision_integer, APrecision_integer, IPrecision_integer)
- paddedH = padTo(w.shape[0], peCount)
- useThresholds = True
-
- paddedW = padTo(w.shape[1], simdCount)
- # compute memory needed for weights and thresholds
- neededWMem = (paddedW * paddedH) / (simdCount * peCount)
- neededTMem = paddedH / peCount
- print "Layer %d: %d x %d" % (fcl, paddedH, paddedW)
- print "WMem = %d TMem = %d" % (neededWMem, neededTMem)
- print "IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional)
-
- m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
- m.addMatrix(w,t,paddedW,paddedH)
-
- config += (printFCDefines("L%d" % fcl, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
-
- #generate HLS weight and threshold header file to initialize memory directly on bitstream generation
- #m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(fcl) + ".h", str(fcl))
-
- #generate binary weight and threshold files to initialize memory during runtime
- #because HLS might not work for very large header files
- m.createBinFiles(targetDirBin, str(fcl), useThresholds)
+ m.createBinFiles(targetDirBin, str(fcl), useThresholds)
config+="#endif //__LAYER_CONFIG_H_\n"
-
configFile = open(targetDirHLS+"/config.h", "w")
configFile.write(config)
configFile.close()
with open(targetDirBin + "/classes.txt", "w") as f:
f.write("\n".join(classes))
+
+
\ No newline at end of file
diff --git a/bnn/src/training/mnist-gen-weights-W1A1.py b/bnn/src/training/mnist-gen-weights-W1A1.py
index e0d6bd91..0ffdbc62 100644
--- a/bnn/src/training/mnist-gen-weights-W1A1.py
+++ b/bnn/src/training/mnist-gen-weights-W1A1.py
@@ -1,57 +1,60 @@
-#BSD 3-Clause License
-#=======
-#
-#Copyright (c) 2017, Xilinx
-#All rights reserved.
-#
-#Redistribution and use in source and binary forms, with or without
-#modification, are permitted provided that the following conditions are met:
-#
-#* Redistributions of source code must retain the above copyright notice, this
+# BSD 3-Clause License
+# =======
+
+# Copyright (c) 2020, Xilinx
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
-#
-#* Redistributions in binary form must reproduce the above copyright notice,
+
+# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
-#
-#* Neither the name of the copyright holder nor the names of its
+
+# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
-#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
-import sys
import finnthesizer as fth
if __name__ == "__main__":
bnnRoot = "."
npzFile = bnnRoot + "/mnist-1w-1a.npz"
- targetDirBin = bnnRoot + "/binparam-lfcW1A1-pynq"
- targetDirHLS = bnnRoot + "/binparam-lfcW1A1-pynq/hw"
+ targetDirBin = bnnRoot + "/lfcW1A1"
+ targetDirHLS = bnnRoot + "/lfcW1A1/hw"
simdCounts = [64, 32, 64, 8]
peCounts = [32, 64, 32, 16]
+ WeightsPrecisions_integer = [1, 1, 1, 1]
WeightsPrecisions_fractional = [0, 0, 0, 0]
- ActivationPrecisions_fractional = [0, 0, 0, 0]
+
+ InputPrecisions_integer = [1, 1, 1, 1]
InputPrecisions_fractional = [0, 0, 0, 0]
- WeightsPrecisions_integer = [1, 1, 1, 1]
+
ActivationPrecisions_integer = [1, 1, 1, 1]
- InputPrecisions_integer = [1, 1, 1, 1]
+ ActivationPrecisions_fractional = [0, 0, 0, 0]
- classes = map(lambda x: str(x), range(10))
+ classes = [str(x) for x in range(10)]
- fth.convertFCNetwork(npzFile, targetDirBin, targetDirHLS, simdCounts, peCounts, WeightsPrecisions_fractional, ActivationPrecisions_fractional, InputPrecisions_fractional, WeightsPrecisions_integer, ActivationPrecisions_integer, InputPrecisions_integer)
+ fth.convertFCNetwork(npzFile, targetDirBin, targetDirHLS, simdCounts, peCounts, \
+ WeightsPrecisions_fractional, ActivationPrecisions_fractional, InputPrecisions_fractional, \
+ WeightsPrecisions_integer, ActivationPrecisions_integer, InputPrecisions_integer)
with open(targetDirBin + "/classes.txt", "w") as f:
f.write("\n".join(classes))
diff --git a/bnn/src/training/mnist-gen-weights-W1A2.py b/bnn/src/training/mnist-gen-weights-W1A2.py
index a59d2a87..e712dab6 100644
--- a/bnn/src/training/mnist-gen-weights-W1A2.py
+++ b/bnn/src/training/mnist-gen-weights-W1A2.py
@@ -1,57 +1,60 @@
-#BSD 3-Clause License
-#=======
-#
-#Copyright (c) 2017, Xilinx
-#All rights reserved.
-#
-#Redistribution and use in source and binary forms, with or without
-#modification, are permitted provided that the following conditions are met:
-#
-#* Redistributions of source code must retain the above copyright notice, this
+# BSD 3-Clause License
+# =======
+
+# Copyright (c) 2020, Xilinx
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
-#
-#* Redistributions in binary form must reproduce the above copyright notice,
+
+# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
-#
-#* Neither the name of the copyright holder nor the names of its
+
+# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
-#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
-import sys
import finnthesizer as fth
if __name__ == "__main__":
bnnRoot = "."
npzFile = bnnRoot + "/mnist-1w-2a.npz"
- targetDirBin = bnnRoot + "/binparam-lfcW1A2-pynq"
- targetDirHLS = bnnRoot + "/binparam-lfcW1A2-pynq/hw"
+ targetDirBin = bnnRoot + "/lfcW1A2"
+ targetDirHLS = bnnRoot + "/lfcW1A2/hw"
simdCounts = [64, 32, 64, 8]
peCounts = [32, 64, 32, 16]
+ WeightsPrecisions_integer = [1, 1, 1, 1]
WeightsPrecisions_fractional = [0, 0, 0, 0]
- ActivationPrecisions_fractional = [0, 0, 0, 0]
+
+ InputPrecisions_integer = [1, 2, 2, 2]
InputPrecisions_fractional = [0, 0, 0, 0]
- WeightsPrecisions_integer = [1, 1, 1, 1]
+
ActivationPrecisions_integer = [2, 2, 2, 1]
- InputPrecisions_integer = [1, 2, 2, 2]
+ ActivationPrecisions_fractional = [0, 0, 0, 0]
- classes = map(lambda x: str(x), range(10))
+ classes = [str(x) for x in range(10)]
- fth.convertFCNetwork(npzFile, targetDirBin, targetDirHLS, simdCounts, peCounts, WeightsPrecisions_fractional, ActivationPrecisions_fractional, InputPrecisions_fractional, WeightsPrecisions_integer, ActivationPrecisions_integer, InputPrecisions_integer)
+ fth.convertFCNetwork(npzFile, targetDirBin, targetDirHLS, simdCounts, peCounts, \
+ WeightsPrecisions_fractional, ActivationPrecisions_fractional, InputPrecisions_fractional,\
+ WeightsPrecisions_integer, ActivationPrecisions_integer, InputPrecisions_integer)
with open(targetDirBin + "/classes.txt", "w") as f:
f.write("\n".join(classes))