Skip to content

Commit

Permalink
Merge pull request #3 from aloeliger/namespaceTypesAndWeights
Browse files Browse the repository at this point in the history
Add namespaces to all CICADA types and weights
  • Loading branch information
aloeliger authored Mar 21, 2024
2 parents 2baca92 + 8aac56a commit 8b05ed3
Show file tree
Hide file tree
Showing 93 changed files with 614 additions and 437 deletions.
4 changes: 3 additions & 1 deletion CICADA_v1/caloADModel_v1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
#include <any>
#include "ap_fixed.h"

using namespace CICADA_v1;

class caloADModel_v1 : public hls4mlEmulator::Model{
private:
input_t _input[N_INPUT_1_1];
Expand Down Expand Up @@ -37,4 +39,4 @@ extern "C" hls4mlEmulator::Model* create_model()
extern "C" void destroy_model(hls4mlEmulator::Model* m)
{
delete m;
}
}
36 changes: 19 additions & 17 deletions CICADA_v1/defines.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,24 @@
#define N_CH_OUT 2

//hls-fpga-machine-learning insert layer-precision
typedef ap_ufixed<10,10> inputs_accum_t;
typedef ap_ufixed<10,10> input_t;
typedef ap_fixed<20,18> dense1_accum_t;
typedef ap_fixed<20,18> layer2_t;
typedef ap_fixed<2,1> weight2_t;
typedef ap_uint<1> bias2_t;
typedef ap_uint<1> layer2_index;
typedef ap_fixed<15,12,AP_RND,AP_SAT,AP_SAT> layer4_t;
typedef ap_fixed<18,3> qbn1_scale_t;
typedef ap_fixed<18,3> qbn1_bias_t;
typedef ap_ufixed<5,2,AP_RND,AP_SAT,AP_SAT> layer5_t;
typedef ap_fixed<18,8> relu1_table_t;
typedef ap_fixed<11,5> output_accum_t;
typedef ap_fixed<11,5> result_t;
typedef ap_fixed<4,1> weight6_t;
typedef ap_uint<1> bias6_t;
typedef ap_uint<1> layer6_index;
namespace CICADA_v1{
typedef ap_ufixed<10,10> inputs_accum_t;
typedef ap_ufixed<10,10> input_t;
typedef ap_fixed<20,18> dense1_accum_t;
typedef ap_fixed<20,18> layer2_t;
typedef ap_fixed<2,1> weight2_t;
typedef ap_uint<1> bias2_t;
typedef ap_uint<1> layer2_index;
typedef ap_fixed<15,12,AP_RND,AP_SAT,AP_SAT> layer4_t;
typedef ap_fixed<18,3> qbn1_scale_t;
typedef ap_fixed<18,3> qbn1_bias_t;
typedef ap_ufixed<5,2,AP_RND,AP_SAT,AP_SAT> layer5_t;
typedef ap_fixed<18,8> relu1_table_t;
typedef ap_fixed<11,5> output_accum_t;
typedef ap_fixed<11,5> result_t;
typedef ap_fixed<4,1> weight6_t;
typedef ap_uint<1> bias6_t;
typedef ap_uint<1> layer6_index;
}

#endif
2 changes: 2 additions & 0 deletions CICADA_v1/myproject.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
#include "myproject.h"
#include "parameters.h"

using namespace CICADA_v1;

void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
Expand Down
1 change: 1 addition & 0 deletions CICADA_v1/myproject.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@

#include "defines.h"

using namespace CICADA_v1;
// Prototype of top level function for C-synthesis
void myproject(
input_t Inputs[N_INPUT_1_1],
Expand Down
31 changes: 16 additions & 15 deletions CICADA_v1/parameters.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@

//hls-fpga-machine-learning insert layer-config
// dense1
struct config2 : nnet::dense_config {
namespace CICADA_v1{
struct config2 : nnet::dense_config {
static const unsigned n_in = 252;
static const unsigned n_out = 15;
static const unsigned io_type = nnet::io_parallel;
Expand All @@ -38,11 +39,11 @@ struct config2 : nnet::dense_config {
typedef weight2_t weight_t;
typedef layer2_index index_t;
template<class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};
using product = nnet::product::mult<x_T, y_T>;
};

// QBN1
struct config4 : nnet::batchnorm_config {
// QBN1
struct config4 : nnet::batchnorm_config {
static const unsigned n_in = N_LAYER_2;
static const unsigned n_filt = -1;
static const unsigned n_scale_bias = (n_filt == -1) ? n_in : n_filt;
Expand All @@ -52,20 +53,20 @@ struct config4 : nnet::batchnorm_config {
typedef qbn1_bias_t bias_t;
typedef qbn1_scale_t scale_t;
template<class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};
using product = nnet::product::mult<x_T, y_T>;
};

// relu1
struct relu_config5 : nnet::activ_config {
// relu1
struct relu_config5 : nnet::activ_config {
static const unsigned n_in = 15;
static const unsigned table_size = 1024;
static const unsigned io_type = nnet::io_parallel;
static const unsigned reuse_factor = 3;
typedef relu1_table_t table_t;
};
};

// output
struct config6 : nnet::dense_config {
// output
struct config6 : nnet::dense_config {
static const unsigned n_in = 15;
static const unsigned n_out = 1;
static const unsigned io_type = nnet::io_parallel;
Expand All @@ -79,8 +80,8 @@ struct config6 : nnet::dense_config {
typedef weight6_t weight_t;
typedef layer6_index index_t;
template<class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};

using product = nnet::product::mult<x_T, y_T>;
};
}

#endif
7 changes: 4 additions & 3 deletions CICADA_v1/weights/b2.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@
#ifndef B2_H_
#define B2_H_

namespace CICADA_v1{
#ifdef LOAD_WEIGHTS_FROM_TXT
bias2_t b2[15];
bias2_t b2[15];
#else
bias2_t b2[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
bias2_t b2[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#endif

}
#endif
6 changes: 4 additions & 2 deletions CICADA_v1/weights/b4.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
#ifndef B4_H_
#define B4_H_

namespace CICADA_v1{
#ifdef LOAD_WEIGHTS_FROM_TXT
qbn1_bias_t b4[15];
qbn1_bias_t b4[15];
#else
qbn1_bias_t b4[15] = {0.46103125810623, 0.51285690069199, -0.50042384862900, 0.28249555826187, 0.71972000598907, 1.12275481224060, -0.40971821546555, 0.34330978989601, -0.55154109001160, -0.27038201689720, 0.33504489064217, 1.41388034820557, -0.78045839071274, 0.02692255377769, -1.01562500000000};
qbn1_bias_t b4[15] = {0.46103125810623, 0.51285690069199, -0.50042384862900, 0.28249555826187, 0.71972000598907, 1.12275481224060, -0.40971821546555, 0.34330978989601, -0.55154109001160, -0.27038201689720, 0.33504489064217, 1.41388034820557, -0.78045839071274, 0.02692255377769, -1.01562500000000};
#endif
}

#endif
6 changes: 4 additions & 2 deletions CICADA_v1/weights/b6.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
#ifndef B6_H_
#define B6_H_

namespace CICADA_v1{
#ifdef LOAD_WEIGHTS_FROM_TXT
bias6_t b6[1];
bias6_t b6[1];
#else
bias6_t b6[1] = {0};
bias6_t b6[1] = {0};
#endif
}

#endif
6 changes: 4 additions & 2 deletions CICADA_v1/weights/s4.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
#ifndef S4_H_
#define S4_H_

namespace CICADA_v1{
#ifdef LOAD_WEIGHTS_FROM_TXT
qbn1_scale_t s4[15];
qbn1_scale_t s4[15];
#else
qbn1_scale_t s4[15] = {0.01189631503075, 0.01800997555256, 0.01802297122777, 0.02153923362494, 0.01781351678073, 0.01568748056889, 0.03175498172641, 0.01092354580760, 0.03501369431615, 0.01039832551032, 0.01362946536392, 0.01694594882429, 0.01294392812997, 0.01520584244281, 0.02299462258816};
qbn1_scale_t s4[15] = {0.01189631503075, 0.01800997555256, 0.01802297122777, 0.02153923362494, 0.01781351678073, 0.01568748056889, 0.03175498172641, 0.01092354580760, 0.03501369431615, 0.01039832551032, 0.01362946536392, 0.01694594882429, 0.01294392812997, 0.01520584244281, 0.02299462258816};
#endif
}

#endif
6 changes: 4 additions & 2 deletions CICADA_v1/weights/w2.h

Large diffs are not rendered by default.

6 changes: 4 additions & 2 deletions CICADA_v1/weights/w6.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
#ifndef W6_H_
#define W6_H_

namespace CICADA_v1{
#ifdef LOAD_WEIGHTS_FROM_TXT
weight6_t w6[15];
weight6_t w6[15];
#else
weight6_t w6[15] = {-0.125, 0.000, 0.250, 0.250, 0.250, -0.125, 0.250, 0.250, 0.125, 0.375, 0.000, -0.125, 0.375, 0.250, 0.125};
weight6_t w6[15] = {-0.125, 0.000, 0.250, 0.250, 0.250, -0.125, 0.250, 0.250, 0.125, 0.375, 0.000, -0.125, 0.375, 0.250, 0.125};
#endif
}

#endif
4 changes: 3 additions & 1 deletion CICADA_v1p1/caloADModel_v1p1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
#include <any>
#include "ap_fixed.h"

using namespace CICADA_v1p1;

class caloADModel_v1p1 : public hls4mlEmulator::Model{
private:
input_t _input[N_INPUT_1_1];
Expand Down Expand Up @@ -37,4 +39,4 @@ extern "C" hls4mlEmulator::Model* create_model()
extern "C" void destroy_model(hls4mlEmulator::Model* m)
{
delete m;
}
}
38 changes: 20 additions & 18 deletions CICADA_v1p1/defines.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,25 @@
#define N_CH_IN 36
#define N_CH_OUT 2

//hls-fpga-machine-learning insert layer-precision
typedef ap_ufixed<10,10> inputs_accum_t;
typedef ap_ufixed<10,10> input_t;
typedef ap_fixed<20,18> dense1_accum_t;
typedef ap_fixed<20,18> layer2_t;
typedef ap_fixed<2,1> weight2_t;
typedef ap_uint<1> bias2_t;
typedef ap_uint<1> layer2_index;
typedef ap_fixed<15,12,AP_RND,AP_SAT,AP_SAT> layer4_t;
typedef ap_fixed<18,3> qbn1_scale_t;
typedef ap_fixed<18,3> qbn1_bias_t;
typedef ap_ufixed<5,2,AP_RND,AP_SAT,AP_SAT> layer5_t;
typedef ap_fixed<18,8> relu1_table_t;
typedef ap_fixed<11,5> output_accum_t;
typedef ap_fixed<11,5> result_t;
typedef ap_fixed<4,1> weight6_t;
typedef ap_uint<1> bias6_t;
typedef ap_uint<1> layer6_index;
namespace CICADA_v1p1{
//hls-fpga-machine-learning insert layer-precision
typedef ap_ufixed<10,10> inputs_accum_t;
typedef ap_ufixed<10,10> input_t;
typedef ap_fixed<20,18> dense1_accum_t;
typedef ap_fixed<20,18> layer2_t;
typedef ap_fixed<2,1> weight2_t;
typedef ap_uint<1> bias2_t;
typedef ap_uint<1> layer2_index;
typedef ap_fixed<15,12,AP_RND,AP_SAT,AP_SAT> layer4_t;
typedef ap_fixed<18,3> qbn1_scale_t;
typedef ap_fixed<18,3> qbn1_bias_t;
typedef ap_ufixed<5,2,AP_RND,AP_SAT,AP_SAT> layer5_t;
typedef ap_fixed<18,8> relu1_table_t;
typedef ap_fixed<11,5> output_accum_t;
typedef ap_fixed<11,5> result_t;
typedef ap_fixed<4,1> weight6_t;
typedef ap_uint<1> bias6_t;
typedef ap_uint<1> layer6_index;
}

#endif
2 changes: 2 additions & 0 deletions CICADA_v1p1/myproject.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
#include "myproject.h"
#include "parameters.h"

using namespace CICADA_v1p1;

void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
Expand Down
2 changes: 2 additions & 0 deletions CICADA_v1p1/myproject.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@

#include "defines.h"

using namespace CICADA_v1p1;

// Prototype of top level function for C-synthesis
void myproject(
input_t Inputs[N_INPUT_1_1],
Expand Down
35 changes: 18 additions & 17 deletions CICADA_v1p1/parameters.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,10 @@
#include "weights/w6.h"
#include "weights/b6.h"

//hls-fpga-machine-learning insert layer-config
// dense1
struct config2 : nnet::dense_config {
namespace CICADA_v1p1{
//hls-fpga-machine-learning insert layer-config
// dense1
struct config2 : nnet::dense_config {
static const unsigned n_in = 252;
static const unsigned n_out = 15;
static const unsigned io_type = nnet::io_parallel;
Expand All @@ -38,11 +39,11 @@ struct config2 : nnet::dense_config {
typedef weight2_t weight_t;
typedef layer2_index index_t;
template<class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};
using product = nnet::product::mult<x_T, y_T>;
};

// QBN1
struct config4 : nnet::batchnorm_config {
// QBN1
struct config4 : nnet::batchnorm_config {
static const unsigned n_in = N_LAYER_2;
static const unsigned n_filt = -1;
static const unsigned n_scale_bias = (n_filt == -1) ? n_in : n_filt;
Expand All @@ -52,20 +53,20 @@ struct config4 : nnet::batchnorm_config {
typedef qbn1_bias_t bias_t;
typedef qbn1_scale_t scale_t;
template<class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};
using product = nnet::product::mult<x_T, y_T>;
};

// relu1
struct relu_config5 : nnet::activ_config {
// relu1
struct relu_config5 : nnet::activ_config {
static const unsigned n_in = 15;
static const unsigned table_size = 1024;
static const unsigned io_type = nnet::io_parallel;
static const unsigned reuse_factor = 3;
typedef relu1_table_t table_t;
};
};

// output
struct config6 : nnet::dense_config {
// output
struct config6 : nnet::dense_config {
static const unsigned n_in = 15;
static const unsigned n_out = 1;
static const unsigned io_type = nnet::io_parallel;
Expand All @@ -79,8 +80,8 @@ struct config6 : nnet::dense_config {
typedef weight6_t weight_t;
typedef layer6_index index_t;
template<class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};

using product = nnet::product::mult<x_T, y_T>;
};
}

#endif
6 changes: 4 additions & 2 deletions CICADA_v1p1/weights/b2.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
#ifndef B2_H_
#define B2_H_

namespace CICADA_v1p1{
#ifdef LOAD_WEIGHTS_FROM_TXT
bias2_t b2[15];
bias2_t b2[15];
#else
bias2_t b2[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
bias2_t b2[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#endif
}

#endif
6 changes: 4 additions & 2 deletions CICADA_v1p1/weights/b4.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
#ifndef B4_H_
#define B4_H_

namespace CICADA_v1p1{
#ifdef LOAD_WEIGHTS_FROM_TXT
qbn1_bias_t b4[15];
qbn1_bias_t b4[15];
#else
qbn1_bias_t b4[15] = {-0.807135, -1.400979, -1.234387, -0.428788, -0.192108, -0.883086, -0.527288, 0.052511, -0.784181, -0.185904, -0.225453, -0.830244, -1.167856, -1.034810, -0.373645};
qbn1_bias_t b4[15] = {-0.807135, -1.400979, -1.234387, -0.428788, -0.192108, -0.883086, -0.527288, 0.052511, -0.784181, -0.185904, -0.225453, -0.830244, -1.167856, -1.034810, -0.373645};
#endif
}

#endif
Loading

0 comments on commit 8b05ed3

Please sign in to comment.