diff --git a/funasr/models/ct_transformer/model.py b/funasr/models/ct_transformer/model.py index 8c3f04327..330d7e554 100644 --- a/funasr/models/ct_transformer/model.py +++ b/funasr/models/ct_transformer/model.py @@ -333,12 +333,13 @@ def inference(self, elif new_mini_sentence[-1] == ",": new_mini_sentence_out = new_mini_sentence[:-1] + "." new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id] - elif new_mini_sentence[-1] != "。" and new_mini_sentence[-1] != "?" and len(new_mini_sentence[-1].encode())==0: + elif new_mini_sentence[-1] != "。" and new_mini_sentence[-1] != "?" and len(new_mini_sentence[-1].encode())!=1: new_mini_sentence_out = new_mini_sentence + "。" new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id] elif new_mini_sentence[-1] != "." and new_mini_sentence[-1] != "?" and len(new_mini_sentence[-1].encode())==1: new_mini_sentence_out = new_mini_sentence + "." new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id] + # keep a punctuations array for punc segment if punc_array is None: punc_array = punctuations diff --git a/funasr/utils/load_utils.py b/funasr/utils/load_utils.py index 9cd38547a..6f60dac43 100644 --- a/funasr/utils/load_utils.py +++ b/funasr/utils/load_utils.py @@ -39,7 +39,8 @@ def load_audio_text_image_video(data_or_path_or_list, fs: int = 16000, audio_fs: if isinstance(data_or_path_or_list, str) and os.path.exists(data_or_path_or_list): # local file if data_type is None or data_type == "sound": data_or_path_or_list, audio_fs = torchaudio.load(data_or_path_or_list) - data_or_path_or_list = data_or_path_or_list[0, :] + if kwargs.get("reduce_channels", True): + data_or_path_or_list = data_or_path_or_list.mean(0) elif data_type == "text" and tokenizer is not None: data_or_path_or_list = tokenizer.encode(data_or_path_or_list) elif data_type == "image": # undo