Skip to content
This repository has been archived by the owner on Jan 11, 2022. It is now read-only.

Added fp16 pytorch support #85

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions nv_wavenet.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,9 @@ class nvWavenetInfer {
void setActivation(float* dst, float* src, size_t size) {
gpuErrChk(cudaMemcpy(dst, src, size*sizeof(float), cudaMemcpyDefault));
}
void setActivation(half* dst, half* src, size_t size) {
gpuErrChk(cudaMemcpy(dst, src, size*sizeof(half), cudaMemcpyDefault));
}
void setActivation(half* dst, float* src, size_t size) {
convert_float2half(dst, src, size);
}
Expand Down Expand Up @@ -419,6 +422,12 @@ class nvWavenetInfer {
setActivation(m_Lh, Lh, m_maxSamples*m_numLayers*m_maxBatch*2*R);
gpuErrChk(cudaMemcpy(m_outputSelectors, outputSelectors, m_maxSamples*m_maxBatch*sizeof(float), cudaMemcpyHostToDevice));

}
void setInputs (half* Lh, float* outputSelectors) {
silenceInputs<<<1,256>>>(m_yInPrev, m_yInCur, m_maxBatch);
setActivation(m_Lh, Lh, m_maxSamples*m_numLayers*m_maxBatch*2*R);
gpuErrChk(cudaMemcpy(m_outputSelectors, outputSelectors, m_maxSamples*m_maxBatch*sizeof(float), cudaMemcpyHostToDevice));

}

void getXtOut(int layer, float* hXt) { getActivation(hXt, m_XtOut + layer*m_maxBatch*R, m_maxBatch*R); }
Expand Down
47 changes: 37 additions & 10 deletions pytorch/wavenet_infer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,11 @@ const int A = 256;
const int R = 64;
const int S = 256;
typedef nvWavenetInfer<float,float, R, S, A> MyWaveNet;
typedef nvWavenetInfer<half2,half, R, S, A> MyWaveNet_half;

std::shared_ptr<MyWaveNet> make_wavenet(int sample_count,

template<typename WaveNetType>
std::shared_ptr<WaveNetType> make_wavenet(int sample_count,
int batch_size,
float* embedding_prev,
float* embedding_curr,
Expand All @@ -55,7 +58,7 @@ std::shared_ptr<MyWaveNet> make_wavenet(int sample_count,
bool use_embed_tanh,
int implementation
) {
std::shared_ptr<MyWaveNet> wavenet(new MyWaveNet(num_layers, max_dilation,
std::shared_ptr<WaveNetType> wavenet(new WaveNetType(num_layers, max_dilation,
batch_size, sample_count,
implementation,
use_embed_tanh));
Expand Down Expand Up @@ -84,14 +87,15 @@ std::shared_ptr<MyWaveNet> make_wavenet(int sample_count,
return wavenet;
}

void infer(std::shared_ptr<MyWaveNet> wavenet,
float* input_features,
template<typename WaveNetType, typename T_data>
void infer(std::shared_ptr<WaveNetType> wavenet,
void* input_features,
int* samples,
int sample_count,
int batch_size) {
Matrix outputSelectors(batch_size, sample_count);
outputSelectors.randomize(0.5,1.0);
wavenet->setInputs(input_features, outputSelectors.data());
wavenet->setInputs((T_data*)input_features, outputSelectors.data());

int batch_size_per_block = ((batch_size % 4) == 0) ? 4 : ((batch_size % 2) == 0) ? 2 : 1;
assert(wavenet->run(sample_count, batch_size, samples, batch_size_per_block, true));
Expand All @@ -118,10 +122,12 @@ void wavenet_infer(int sample_count,
float* conv_out_weight,
float* conv_end_weight,
int use_embed_tanh,
float* cond_input,
void* cond_input, bool cond_half,
int implementation,
int* samples) {
std::shared_ptr<MyWaveNet> wavenet = make_wavenet(sample_count,
assert(samples);
if (cond_half) {
std::shared_ptr<MyWaveNet_half> wavenet_half = make_wavenet<MyWaveNet_half>(sample_count,
batch_size,
embedding_prev,
embedding_curr,
Expand All @@ -139,9 +145,30 @@ void wavenet_infer(int sample_count,
use_embed_tanh,
implementation
);
assert(samples);
infer(wavenet, cond_input, samples, sample_count, batch_size);
return;
infer<MyWaveNet_half, half>(wavenet_half, cond_input, samples, sample_count, batch_size);

} else {
std::shared_ptr<MyWaveNet> wavenet = make_wavenet<MyWaveNet>(sample_count,
batch_size,
embedding_prev,
embedding_curr,
num_layers,
max_dilation,
in_layer_weights_prev,
in_layer_weights_curr,
in_layer_biases,
res_layer_weights,
res_layer_biases,
skip_layer_weights,
skip_layer_biases,
conv_out_weight,
conv_end_weight,
use_embed_tanh,
implementation
);
infer<MyWaveNet, float>(wavenet, cond_input, samples, sample_count, batch_size);
}

}

int get_R() {return R;}
Expand Down
3 changes: 2 additions & 1 deletion pytorch/wavenet_infer.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ extern "C" {
// ------------------------------------------------
// C-compatible function for wrapper
// ------------------------------------------------

void wavenet_infer(int sample_count,
int batch_size,
float* embedding_prev,
Expand All @@ -46,7 +47,7 @@ void wavenet_infer(int sample_count,
float* conv_out_weight,
float* conv_end_weight,
int use_embed_tanh,
float* cond_input,
void* cond_input, bool half,
int implementation,
int* samples);

Expand Down
11 changes: 9 additions & 2 deletions pytorch/wavenet_infer_wrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,14 @@ int infer(at::Tensor samples_tensor,
float* embedding_curr = embed_curr_tensor.data<float>();
float* conv_out = conv_out_tensor.data<float>();
float* conv_end = conv_end_tensor.data<float>();
float* cond_input = cond_input_tensor.data<float>();
void* cond_input;
bool cond_half = false;
if (cond_input_tensor.dtype() == at::kHalf) {
cond_input = (void*)cond_input_tensor.data_ptr();
cond_half = true;
} else {
cond_input = (void*)cond_input_tensor.data<float>();
}

float** in_layer_weights_prev = (float**)malloc(num_layers*sizeof(float*));
float** in_layer_weights_curr = (float**)malloc(num_layers*sizeof(float*));
Expand Down Expand Up @@ -84,7 +91,7 @@ int infer(at::Tensor samples_tensor,
conv_out,
conv_end,
use_embed_tanh,
cond_input,
cond_input, cond_half,
implementation,
samples);

Expand Down