diff --git a/backend/bgm_separation/router.py b/backend/bgm_separation/router.py index 1473dcd..63f8b98 100644 --- a/backend/bgm_separation/router.py +++ b/backend/bgm_separation/router.py @@ -19,7 +19,7 @@ @functools.lru_cache -def init_bgm_separation_inferencer() -> 'MusicSeparator': +def get_bgm_separation_inferencer() -> 'MusicSeparator': config = load_server_config()["bgm_separation"] inferencer = MusicSeparator() inferencer.update_model( @@ -33,7 +33,7 @@ async def run_bgm_separation( audio: np.ndarray, params: BGMSeparationParams ) -> Tuple[np.ndarray, np.ndarray]: - instrumental, vocal, filepaths = init_bgm_separation_inferencer().separate( + instrumental, vocal, filepaths = get_bgm_separation_inferencer().separate( audio=audio, model_name=params.model_size, device=params.device, diff --git a/backend/transcription/router.py b/backend/transcription/router.py index 79e393d..9d447e2 100644 --- a/backend/transcription/router.py +++ b/backend/transcription/router.py @@ -19,7 +19,7 @@ @functools.lru_cache -def init_pipeline() -> 'FasterWhisperInference': +def get_pipeline() -> 'FasterWhisperInference': config = load_server_config()["whisper"] inferencer = FasterWhisperInference() inferencer.update_model( @@ -33,7 +33,7 @@ async def run_transcription( audio: np.ndarray, params: TranscriptionPipelineParams ) -> List[Segment]: - segments, elapsed_time = init_pipeline().run( + segments, elapsed_time = get_pipeline().run( audio=audio, progress=gr.Progress(), add_timestamp=False, diff --git a/backend/vad/router.py b/backend/vad/router.py index adad881..b6bdde7 100644 --- a/backend/vad/router.py +++ b/backend/vad/router.py @@ -17,7 +17,7 @@ @functools.lru_cache -def init_vad_model() -> SileroVAD: +def get_vad_model() -> SileroVAD: inferencer = SileroVAD() inferencer.update_model() return inferencer @@ -27,7 +27,7 @@ async def run_vad( audio: np.ndarray, params: VadOptions ) -> List[Dict]: - audio, speech_chunks = init_vad_model().run( + audio, speech_chunks = get_vad_model().run( audio=audio, vad_parameters=params )