Skip to content

Commit

Permalink
Merge pull request #4 from aloeliger/namespaceTypesAndWeights
Browse files Browse the repository at this point in the history
Adds namespace protections for main functions
  • Loading branch information
aloeliger authored Mar 22, 2024
2 parents 8b05ed3 + ed64d78 commit 514adcb
Show file tree
Hide file tree
Showing 17 changed files with 243 additions and 234 deletions.
41 changes: 21 additions & 20 deletions CICADA_v1/myproject.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,30 +21,30 @@
#include "myproject.h"
#include "parameters.h"

using namespace CICADA_v1;
namespace CICADA_v1{

void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
) {
void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
) {

//hls-fpga-machine-learning insert IO
#pragma HLS ARRAY_RESHAPE variable=Inputs complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer6_out complete dim=0
#pragma HLS INTERFACE ap_vld port=Inputs,layer6_out
#pragma HLS PIPELINE
#pragma HLS ARRAY_RESHAPE variable=Inputs complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer6_out complete dim=0
#pragma HLS INTERFACE ap_vld port=Inputs,layer6_out
#pragma HLS PIPELINE

#ifdef LOAD_WEIGHTS_FROM_TXT
static bool loaded_weights = false;
if (!loaded_weights) {
//hls-fpga-machine-learning insert load weights
nnet::load_weights_from_txt<weight2_t, 3780>(w2, "w2.txt");
nnet::load_weights_from_txt<bias2_t, 15>(b2, "b2.txt");
nnet::load_weights_from_txt<qbn1_scale_t, 15>(s4, "s4.txt");
nnet::load_weights_from_txt<qbn1_bias_t, 15>(b4, "b4.txt");
nnet::load_weights_from_txt<weight6_t, 15>(w6, "w6.txt");
nnet::load_weights_from_txt<bias6_t, 1>(b6, "b6.txt");
loaded_weights = true;
//hls-fpga-machine-learning insert load weights
nnet::load_weights_from_txt<weight2_t, 3780>(w2, "w2.txt");
nnet::load_weights_from_txt<bias2_t, 15>(b2, "b2.txt");
nnet::load_weights_from_txt<qbn1_scale_t, 15>(s4, "s4.txt");
nnet::load_weights_from_txt<qbn1_bias_t, 15>(b4, "b4.txt");
nnet::load_weights_from_txt<weight6_t, 15>(w6, "w6.txt");
nnet::load_weights_from_txt<bias6_t, 1>(b6, "b6.txt");
loaded_weights = true;
}
#endif

Expand All @@ -55,17 +55,18 @@ void myproject(
//hls-fpga-machine-learning insert layers

layer2_t layer2_out[N_LAYER_2];
#pragma HLS ARRAY_PARTITION variable=layer2_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer2_out complete dim=0
nnet::dense<input_t, layer2_t, config2>(Inputs, layer2_out, w2, b2); // dense1

layer4_t layer4_out[N_LAYER_2];
#pragma HLS ARRAY_PARTITION variable=layer4_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer4_out complete dim=0
nnet::normalize<layer2_t, layer4_t, config4>(layer2_out, layer4_out, s4, b4); // QBN1

layer5_t layer5_out[N_LAYER_2];
#pragma HLS ARRAY_PARTITION variable=layer5_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer5_out complete dim=0
nnet::relu<layer4_t, layer5_t, relu_config5>(layer4_out, layer5_out); // relu1

nnet::dense<layer5_t, result_t, config6>(layer5_out, layer6_out, w6, b6); // output

}
}
14 changes: 7 additions & 7 deletions CICADA_v1/myproject.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@

#include "defines.h"

using namespace CICADA_v1;
// Prototype of top level function for C-synthesis
void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
);

namespace CICADA_v1{
// Prototype of top level function for C-synthesis
void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
);
}
#endif
41 changes: 21 additions & 20 deletions CICADA_v1p1/myproject.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,30 +21,30 @@
#include "myproject.h"
#include "parameters.h"

using namespace CICADA_v1p1;
namespace CICADA_v1p1{

void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
) {
void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
) {

//hls-fpga-machine-learning insert IO
#pragma HLS ARRAY_RESHAPE variable=Inputs complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer6_out complete dim=0
#pragma HLS INTERFACE ap_vld port=Inputs,layer6_out
#pragma HLS PIPELINE
#pragma HLS ARRAY_RESHAPE variable=Inputs complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer6_out complete dim=0
#pragma HLS INTERFACE ap_vld port=Inputs,layer6_out
#pragma HLS PIPELINE

#ifdef LOAD_WEIGHTS_FROM_TXT
static bool loaded_weights = false;
if (!loaded_weights) {
//hls-fpga-machine-learning insert load weights
nnet::load_weights_from_txt<weight2_t, 3780>(w2, "w2.txt");
nnet::load_weights_from_txt<bias2_t, 15>(b2, "b2.txt");
nnet::load_weights_from_txt<qbn1_scale_t, 15>(s4, "s4.txt");
nnet::load_weights_from_txt<qbn1_bias_t, 15>(b4, "b4.txt");
nnet::load_weights_from_txt<weight6_t, 15>(w6, "w6.txt");
nnet::load_weights_from_txt<bias6_t, 1>(b6, "b6.txt");
loaded_weights = true;
//hls-fpga-machine-learning insert load weights
nnet::load_weights_from_txt<weight2_t, 3780>(w2, "w2.txt");
nnet::load_weights_from_txt<bias2_t, 15>(b2, "b2.txt");
nnet::load_weights_from_txt<qbn1_scale_t, 15>(s4, "s4.txt");
nnet::load_weights_from_txt<qbn1_bias_t, 15>(b4, "b4.txt");
nnet::load_weights_from_txt<weight6_t, 15>(w6, "w6.txt");
nnet::load_weights_from_txt<bias6_t, 1>(b6, "b6.txt");
loaded_weights = true;
}
#endif

Expand All @@ -55,17 +55,18 @@ void myproject(
//hls-fpga-machine-learning insert layers

layer2_t layer2_out[N_LAYER_2];
#pragma HLS ARRAY_PARTITION variable=layer2_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer2_out complete dim=0
nnet::dense<input_t, layer2_t, config2>(Inputs, layer2_out, w2, b2); // dense1

layer4_t layer4_out[N_LAYER_2];
#pragma HLS ARRAY_PARTITION variable=layer4_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer4_out complete dim=0
nnet::normalize<layer2_t, layer4_t, config4>(layer2_out, layer4_out, s4, b4); // QBN1

layer5_t layer5_out[N_LAYER_2];
#pragma HLS ARRAY_PARTITION variable=layer5_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer5_out complete dim=0
nnet::relu<layer4_t, layer5_t, relu_config5>(layer4_out, layer5_out); // relu1

nnet::dense<layer5_t, result_t, config6>(layer5_out, layer6_out, w6, b6); // output

}
}
14 changes: 7 additions & 7 deletions CICADA_v1p1/myproject.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,12 @@

#include "defines.h"

using namespace CICADA_v1p1;

// Prototype of top level function for C-synthesis
void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
);
namespace CICADA_v1p1{

// Prototype of top level function for C-synthesis
void myproject(
input_t Inputs[N_INPUT_1_1],
result_t layer6_out[N_LAYER_6]
);
}
#endif
37 changes: 19 additions & 18 deletions CICADA_v1p1p1/cicada.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,27 +3,27 @@
#include "cicada.h"
#include "parameters.h"

using namespace CICADA_v1p1p1;
namespace CICADA_v1p1p1{

void cicada_v1p1p1(
input_t inputs_[N_INPUT_1_1],
result_t layer8_out[N_LAYER_6]
) {
void cicada_v1p1p1(
input_t inputs_[N_INPUT_1_1],
result_t layer8_out[N_LAYER_6]
) {

// hls-fpga-machine-learning insert IO
#pragma HLS INLINE
#pragma HLS INLINE

#ifdef LOAD_WEIGHTS_FROM_TEXT
static bool loaded_weights = false;
if (!loaded_weights) {
// hls-fpga-machine-learning insert load weights
nnet::load_weights_from_txt<weight2_t, 3780>(w2, "w2.txt");
nnet::load_weights_from_txt<bias2_t, 15>(b2, "b2.txt");
nnet::load_weights_from_txt<qbn1_scale_t, 15>(s4, "s4.txt");
nnet::load_weights_from_txt<qbn1_bias_t, 15>(b4, "b4.txt");
nnet::load_weights_from_txt<weight6_t, 15>(w6, "w6.txt");
nnet::load_weights_from_txt<bias6_t, 1>(b6, "b6.txt");
loaded_weights = true;
// hls-fpga-machine-learning insert load weights
nnet::load_weights_from_txt<weight2_t, 3780>(w2, "w2.txt");
nnet::load_weights_from_txt<bias2_t, 15>(b2, "b2.txt");
nnet::load_weights_from_txt<qbn1_scale_t, 15>(s4, "s4.txt");
nnet::load_weights_from_txt<qbn1_bias_t, 15>(b4, "b4.txt");
nnet::load_weights_from_txt<weight6_t, 15>(w6, "w6.txt");
nnet::load_weights_from_txt<bias6_t, 1>(b6, "b6.txt");
loaded_weights = true;
}
#endif

Expand All @@ -34,21 +34,22 @@ void cicada_v1p1p1(
// hls-fpga-machine-learning insert layers

layer2_t layer2_out[N_LAYER_2];
#pragma HLS ARRAY_PARTITION variable=layer2_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer2_out complete dim=0
nnet::dense<input_t, layer2_t, config2>(inputs_, layer2_out, w2, b2); // dense1

layer4_t layer4_out[N_LAYER_2];
#pragma HLS ARRAY_PARTITION variable=layer4_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer4_out complete dim=0
nnet::normalize<layer2_t, layer4_t, config4>(layer2_out, layer4_out, s4, b4); // QBN1

layer5_t layer5_out[N_LAYER_2];
#pragma HLS ARRAY_PARTITION variable=layer5_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer5_out complete dim=0
nnet::relu<layer4_t, layer5_t, relu_config5>(layer4_out, layer5_out); // relu1

layer6_t layer6_out[N_LAYER_6];
#pragma HLS ARRAY_PARTITION variable=layer6_out complete dim=0
#pragma HLS ARRAY_PARTITION variable=layer6_out complete dim=0
nnet::dense<layer5_t, layer6_t, config6>(layer5_out, layer6_out, w6, b6); // output

nnet::relu<layer6_t, result_t, relu_config8>(layer6_out, layer8_out); // outputs

}
}
14 changes: 7 additions & 7 deletions CICADA_v1p1p1/cicada.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@

#include "defines.h"

using namespace CICADA_v1p1p1;

// Prototype of top level function for C-synthesis
void cicada_v1p1p1(
input_t inputs_[N_INPUT_1_1],
result_t layer8_out[N_LAYER_6]
);
namespace CICADA_v1p1p1{

// Prototype of top level function for C-synthesis
void cicada_v1p1p1(
input_t inputs_[N_INPUT_1_1],
result_t layer8_out[N_LAYER_6]
);
}
#endif
41 changes: 21 additions & 20 deletions CICADA_v1p1p1/parameters.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,10 @@
#include "weights/w6.h"
#include "weights/b6.h"

// hls-fpga-machine-learning insert layer-config
// dense1
struct config2 : nnet::dense_config {
namespace CICADA_v1p1p1{
// hls-fpga-machine-learning insert layer-config
// dense1
struct config2 : nnet::dense_config {
static const unsigned n_in = 252;
static const unsigned n_out = 15;
static const unsigned io_type = nnet::io_parallel;
Expand All @@ -40,11 +41,11 @@ struct config2 : nnet::dense_config {
typedef weight2_t weight_t;
typedef layer2_index index_t;
template<class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};
using product = nnet::product::mult<x_T, y_T>;
};

// QBN1
struct config4 : nnet::batchnorm_config {
// QBN1
struct config4 : nnet::batchnorm_config {
static const unsigned n_in = N_LAYER_2;
static const unsigned n_filt = -1;
static const unsigned n_scale_bias = (n_filt == -1) ? n_in : n_filt;
Expand All @@ -55,20 +56,20 @@ struct config4 : nnet::batchnorm_config {
typedef qbn1_bias_t bias_t;
typedef qbn1_scale_t scale_t;
template<class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};
using product = nnet::product::mult<x_T, y_T>;
};

// relu1
struct relu_config5 : nnet::activ_config {
// relu1
struct relu_config5 : nnet::activ_config {
static const unsigned n_in = 15;
static const unsigned table_size = 1024;
static const unsigned io_type = nnet::io_parallel;
static const unsigned reuse_factor = 1;
typedef relu1_table_t table_t;
};
};

// output
struct config6 : nnet::dense_config {
// output
struct config6 : nnet::dense_config {
static const unsigned n_in = 15;
static const unsigned n_out = 1;
static const unsigned io_type = nnet::io_parallel;
Expand All @@ -83,17 +84,17 @@ struct config6 : nnet::dense_config {
typedef weight6_t weight_t;
typedef layer6_index index_t;
template<class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};
using product = nnet::product::mult<x_T, y_T>;
};

// outputs
struct relu_config8 : nnet::activ_config {
// outputs
struct relu_config8 : nnet::activ_config {
static const unsigned n_in = 1;
static const unsigned table_size = 1024;
static const unsigned io_type = nnet::io_parallel;
static const unsigned reuse_factor = 1;
typedef outputs_table_t table_t;
};

};
}

#endif
Loading

0 comments on commit 514adcb

Please sign in to comment.