-
Notifications
You must be signed in to change notification settings - Fork 59
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #52 from Xilinx/dev
Release v0.0.6
- Loading branch information
Showing
30 changed files
with
1,848 additions
and
931 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
# The multilayer perceptron for cybersecurity use-cases | ||
The multi layer perceptron (MLP) for the cybersecurity use-case is based on the three-part tutorial for training a quantized MLP and deploying it with FINN, which is provided in the FINN [end-to-end example repository](https://github.com/Xilinx/finn/tree/main/notebooks/end2end_example). The MLP consists of four fully-connected layers in total: three hidden layers with 64 neurons, and a final output layer with a single output, all using 2-bit weights. For more information on training the network, or more details behind what's happening under the hood, the notebooks provided in the FINN end-to-end example repository serve as an excellent starting point. | ||
|
||
# Build bitfiles for MLP example | ||
0. Ensure you have performed the *Setup* steps in the top-level README for setting up the FINN requirements and environment variables. | ||
|
||
1. Edit the `mlp-cybersecurity/build.py` to restrict the platform variables to the ones that you are interested in, e.g. `platforms_to_build = ["Pynq-Z1"]`. You can also change the other build configuration options, see the [FINN docs](https://finn-dev.readthedocs.io/en/latest/source_code/finn.util.html#finn.util.build_dataflow.DataflowBuildConfig) for a full explanation. | ||
|
||
2. Launch the build as follows: | ||
```shell | ||
# update this according to where you cloned this repo: | ||
FINN_EXAMPLES=/path/to/finn-examples | ||
# cd into finn submodule | ||
cd $FINN_EXAMPLES/build/finn | ||
# launch the build on the cybersecurity-mlp folder | ||
./run-docker.sh build_custom $FINN_EXAMPLES/build/cybersecurity-mlp | ||
``` | ||
|
||
3. The generated outputs will be under `cybersecurity-mlp/output_<topology>_<platform>`. You can find a description of the generated files [here](https://finn-dev.readthedocs.io/en/latest/command_line.html#simple-dataflow-build-mode). | ||
|
||
# Where did the ONNX model file come from? | ||
The ONNX model is created and exported prior to the build flow is launched. You can find the details of this process in the `cybersecurity-mlp/custom_steps.py` file. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
import finn.builder.build_dataflow as build | ||
import finn.builder.build_dataflow_config as build_cfg | ||
from finn.util.basic import alveo_default_platform | ||
import os | ||
import shutil | ||
from custom_steps import custom_step_mlp_export | ||
|
||
# Which platforms to build the networks for | ||
zynq_platforms = ["Pynq-Z1", "Ultra96", "ZCU104"] | ||
alveo_platforms = [] | ||
|
||
# Note: only zynq platforms currently tested | ||
platforms_to_build = zynq_platforms + alveo_platforms | ||
|
||
# determine which shell flow to use for a given platform | ||
def platform_to_shell(platform): | ||
if platform in zynq_platforms: | ||
return build_cfg.ShellFlowType.VIVADO_ZYNQ | ||
elif platform in alveo_platforms: | ||
return build_cfg.ShellFlowType.VITIS_ALVEO | ||
else: | ||
raise Exception("Unknown platform, can't determine ShellFlowType") | ||
|
||
# Define model name | ||
model_name = "unsw_nb15-mlp-w2a2" | ||
|
||
# Create a release dir, used for finn-examples release packaging | ||
os.makedirs("release", exist_ok=True) | ||
|
||
for platform_name in platforms_to_build: | ||
shell_flow_type = platform_to_shell(platform_name) | ||
if shell_flow_type == build_cfg.ShellFlowType.VITIS_ALVEO: | ||
vitis_platform = alveo_default_platform[platform_name] | ||
# for Alveo, use the Vitis platform name as the release name | ||
# e.g. xilinx_u250_xdma_201830_2 | ||
release_platform_name = vitis_platform | ||
else: | ||
vitis_platform = None | ||
# for Zynq, use the board name as the release name | ||
# e.g. ZCU104 | ||
release_platform_name = platform_name | ||
platform_dir = "release/%s" % release_platform_name | ||
os.makedirs(platform_dir, exist_ok=True) | ||
# Set up the build configuration for this model | ||
cfg = build_cfg.DataflowBuildConfig( | ||
output_dir = "output_%s_%s" % (model_name, release_platform_name), | ||
mvau_wwidth_max = 80, | ||
target_fps = 1000000, | ||
synth_clk_period_ns = 10.0, | ||
board = platform_name, | ||
shell_flow_type = shell_flow_type, | ||
vitis_platform = vitis_platform, | ||
vitis_opt_strategy = build_cfg.VitisOptStrategyCfg.PERFORMANCE_BEST, | ||
generate_outputs = [ | ||
build_cfg.DataflowOutputType.PYNQ_DRIVER, | ||
build_cfg.DataflowOutputType.ESTIMATE_REPORTS, | ||
build_cfg.DataflowOutputType.BITFILE, | ||
build_cfg.DataflowOutputType.DEPLOYMENT_PACKAGE, | ||
], | ||
save_intermediate_models=True | ||
) | ||
|
||
# Export MLP model to FINN-ONNX | ||
model = custom_step_mlp_export(model_name) | ||
# Launch FINN compiler to generate bitfile | ||
build.build_dataflow_cfg(model, cfg) | ||
# Copy bitfiles into release dir if found | ||
bitfile_gen_dir = cfg.output_dir + "/bitfile" | ||
filtes_to_check_and_copy = [ | ||
"finn-accel.bit", | ||
"finn-accel.hwh", | ||
"finn-accel.xclbin" | ||
] | ||
for f in filtes_to_check_and_copy: | ||
src_file = bitfile_gen_dir + "/" + f | ||
dst_file = platform_dir + "/" + f.replace("finn-accel", model_name) | ||
if os.path.isfile(src_file): | ||
shutil.copy(src_file, dst_file) |
Oops, something went wrong.