From 5193d0c9c5c5baa3db95b66c5536387f6f45a847 Mon Sep 17 00:00:00 2001 From: MediaPipe Team Date: Fri, 4 Oct 2024 08:23:08 -0700 Subject: [PATCH] Fix the multiple typos in our internal task files PiperOrigin-RevId: 682328927 --- mediapipe/tasks/python/genai/bundler/llm_bundler.py | 2 +- mediapipe/tasks/python/genai/converter/converter_base.py | 2 +- mediapipe/tasks/python/genai/converter/llm_converter.py | 2 +- mediapipe/tasks/python/genai/converter/quantization_util.py | 6 +++--- .../web/genai/llm_inference/llm_inference_options.d.ts | 2 +- .../web/genai_experimental/rag_pipeline/rag_pipeline.ts | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mediapipe/tasks/python/genai/bundler/llm_bundler.py b/mediapipe/tasks/python/genai/bundler/llm_bundler.py index c199cbddb4..e8e2824a6c 100644 --- a/mediapipe/tasks/python/genai/bundler/llm_bundler.py +++ b/mediapipe/tasks/python/genai/bundler/llm_bundler.py @@ -34,7 +34,7 @@ class BundleConfig: passed here. start_token: Token that will be used to signify the beginning of a sequence. stop_tokens: Tokens that will be used to signify the end of a sequence. - output_filename: Name of the generated `.task` file containg the Bundle. + output_filename: Name of the generated `.task` file containing the Bundle. enable_bytes_to_unicode_mapping: Enables GPT-2 style bytes to unicode mapping. For more details see: https://github.com/openai/gpt-2/blob/master/src/encoder.py#L9 diff --git a/mediapipe/tasks/python/genai/converter/converter_base.py b/mediapipe/tasks/python/genai/converter/converter_base.py index b759b12d56..50bb5e5f0a 100644 --- a/mediapipe/tasks/python/genai/converter/converter_base.py +++ b/mediapipe/tasks/python/genai/converter/converter_base.py @@ -23,7 +23,7 @@ class QuantizationAction: """Container of the tensor values and its corresponding quantization settings. - The contrainer is responsible for hosting all of the information that is + The container is responsible for hosting all of the information that is required to execute the weight-only quantization. Attributes: diff --git a/mediapipe/tasks/python/genai/converter/llm_converter.py b/mediapipe/tasks/python/genai/converter/llm_converter.py index 2c07501a29..cff7948b97 100644 --- a/mediapipe/tasks/python/genai/converter/llm_converter.py +++ b/mediapipe/tasks/python/genai/converter/llm_converter.py @@ -244,7 +244,7 @@ def convert_bpe_vocab(vocab_model_file: str, output_dir: str) -> str: if not os.path.isdir(vocab_model_file): raise ValueError( 'The input BPE vocab model file path is expected to be a directory that' - ' conatins both tokenizer.json and tokenizer_config.json files.' + ' contains both tokenizer.json and tokenizer_config.json files.' ) output_vocab_file = os.path.join(output_dir, 'spm.model') model_ckpt_util.ConvertHfTokenizer(vocab_model_file, output_vocab_file) diff --git a/mediapipe/tasks/python/genai/converter/quantization_util.py b/mediapipe/tasks/python/genai/converter/quantization_util.py index ed3e173048..46eb740242 100644 --- a/mediapipe/tasks/python/genai/converter/quantization_util.py +++ b/mediapipe/tasks/python/genai/converter/quantization_util.py @@ -162,7 +162,7 @@ def get_best_bound( p_value: float = 1.0, per_channel: bool = False, ) -> JTensor: - """Scan mutliple factors on max value to get best bound value. + """Scan multiple factors on max value to get best bound value. This does a scan to get bound value(s) that minimize mean absolute error (MAE) between original tensor 't' and quantized tensor. It's (almost) equivalent to @@ -239,7 +239,7 @@ def reduce_precision( Args: t: Input tensor. - contract_dims: Speficies contracting dimesnions of the input tensor. + contract_dims: Specifies contracting dimensions of the input tensor. need_gradient: If gradient is needed out of this function. bits: Target number of bits. optimization_on_bound: If MAE bound optimizer is used. @@ -405,7 +405,7 @@ def pack_4bit( packed_dtype: Target type to pack to, int32 or int8. Returns: - int32 or int8 packed tensor where the pack_dim size is dividened by 8 + int32 or int8 packed tensor where the pack_dim size is dividend by 8 from the original tensor x. """ x = jnp.asarray(x) diff --git a/mediapipe/tasks/web/genai/llm_inference/llm_inference_options.d.ts b/mediapipe/tasks/web/genai/llm_inference/llm_inference_options.d.ts index 5df200aa80..5ec7914fa4 100644 --- a/mediapipe/tasks/web/genai/llm_inference/llm_inference_options.d.ts +++ b/mediapipe/tasks/web/genai/llm_inference/llm_inference_options.d.ts @@ -32,7 +32,7 @@ export declare interface WebGpuOptions { // TODO: b/327685206 - Fill Adapter infor for LLM Web task /** - * The information of WebGPU adapater, which will be used to optimize the + * The information of WebGPU adapter, which will be used to optimize the * performance for LLM Inference task. */ adapterInfo?: GPUAdapterInfo; diff --git a/mediapipe/tasks/web/genai_experimental/rag_pipeline/rag_pipeline.ts b/mediapipe/tasks/web/genai_experimental/rag_pipeline/rag_pipeline.ts index 4ab128af44..7792e90564 100644 --- a/mediapipe/tasks/web/genai_experimental/rag_pipeline/rag_pipeline.ts +++ b/mediapipe/tasks/web/genai_experimental/rag_pipeline/rag_pipeline.ts @@ -89,7 +89,7 @@ export class RagPipeline { // We currently only use a single .wasm file and a single .data file (for // the tasks that have to load assets). We need to revisit how we // initialize the file locator if we ever need to differentiate between - // diffferent files. + // different files. if (file.endsWith('.wasm')) { return wasmFileset.wasmBinaryPath.toString(); } else if (wasmFileset.assetBinaryPath && file.endsWith('.data')) {