Skip to content
This repository has been archived by the owner on Aug 30, 2024. It is now read-only.

Commit

Permalink
spell fix
Browse files Browse the repository at this point in the history
Signed-off-by: Wenxin Zhang <[email protected]>
  • Loading branch information
VincyZhang committed Feb 4, 2024
1 parent 29dfa35 commit 9c0e728
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 5 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/scripts/formatScan/nlp_dict.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,7 @@ ue
wya
ser
mone
iterm
tne
aadd
endianess
4 changes: 2 additions & 2 deletions neural_speed/core/layers/mha_dense.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1345,7 +1345,7 @@ class mha_stable_interface_t {
const auto group_heads = p.head_num / p.heads_kv;
const auto sl_diff = p.sl_kv - p.sl_q;

// TP will need the real rank oder of k
// TP will need the real rank order of k
int32_t k_offset = 0;
int32_t log_head_num = p.head_num;
#ifdef NS_TP_MODEL
Expand Down Expand Up @@ -1731,7 +1731,7 @@ void bestla_fusion_attn_forward_ref(const attn_fwd_args_t<Q_T, K_T, V_T, DST_T>&
const auto ROWPACK = p.V_layout == ATTN_FWD_LAYOUT_NTILE48_ROWPACK4 ? 4
: p.V_layout == ATTN_FWD_LAYOUT_NTILE48_ROWPACK2 ? 2
: 0;
// TP will need the real rank oder of k
// TP will need the real rank order of k
int32_t k_offset = 0;
int32_t log_head_num = p.head_num;
#ifdef NS_TP_MODEL
Expand Down
4 changes: 2 additions & 2 deletions neural_speed/core/ne_layers.c
Original file line number Diff line number Diff line change
Expand Up @@ -7707,7 +7707,7 @@ static void ne_compute_forward_alibi_f32(const struct ne_compute_params* params,
assert(nb0 == sizeof(float));
assert(ne1 + n_past == ne0);
(void)n_past;
// TP will need the real rank oder of k
// TP will need the real rank order of k
int32_t k_offset = 0;
#ifdef NS_TP_MODEL
parallel_context* p_ctx = init_parallel_context();
Expand Down Expand Up @@ -7777,7 +7777,7 @@ static void ne_compute_forward_alibi_f16(const struct ne_compute_params* params,
assert(nb0 == sizeof(ne_fp16_t));
assert(ne1 + n_past == ne0);
(void)n_past;
// TP will need the real rank oder of k
// TP will need the real rank order of k
int32_t k_offset = 0;
#ifdef NS_TP_MODEL
parallel_context* p_ctx = init_parallel_context();
Expand Down
2 changes: 1 addition & 1 deletion neural_speed/models/whisper/whisper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ struct whisper_model_t {
struct whisper_sequence_t {
std::vector<whisper_token_data> tokens;

// the accumulated transcription in the current interation (used to truncate
// the accumulated transcription in the current interaction (used to truncate
// the tokens array)
int result_len;

Expand Down

0 comments on commit 9c0e728

Please sign in to comment.