Skip to content

Commit

Permalink
Fix the multiple typos in our internal task files
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 682328927
  • Loading branch information
MediaPipe Team authored and copybara-github committed Oct 4, 2024
1 parent a3596e3 commit 5193d0c
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion mediapipe/tasks/python/genai/bundler/llm_bundler.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class BundleConfig:
passed here.
start_token: Token that will be used to signify the beginning of a sequence.
stop_tokens: Tokens that will be used to signify the end of a sequence.
output_filename: Name of the generated `.task` file containg the Bundle.
output_filename: Name of the generated `.task` file containing the Bundle.
enable_bytes_to_unicode_mapping: Enables GPT-2 style bytes to unicode
mapping. For more details see:
https://github.com/openai/gpt-2/blob/master/src/encoder.py#L9
Expand Down
2 changes: 1 addition & 1 deletion mediapipe/tasks/python/genai/converter/converter_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
class QuantizationAction:
"""Container of the tensor values and its corresponding quantization settings.
The contrainer is responsible for hosting all of the information that is
The container is responsible for hosting all of the information that is
required to execute the weight-only quantization.
Attributes:
Expand Down
2 changes: 1 addition & 1 deletion mediapipe/tasks/python/genai/converter/llm_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ def convert_bpe_vocab(vocab_model_file: str, output_dir: str) -> str:
if not os.path.isdir(vocab_model_file):
raise ValueError(
'The input BPE vocab model file path is expected to be a directory that'
' conatins both tokenizer.json and tokenizer_config.json files.'
' contains both tokenizer.json and tokenizer_config.json files.'
)
output_vocab_file = os.path.join(output_dir, 'spm.model')
model_ckpt_util.ConvertHfTokenizer(vocab_model_file, output_vocab_file)
Expand Down
6 changes: 3 additions & 3 deletions mediapipe/tasks/python/genai/converter/quantization_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def get_best_bound(
p_value: float = 1.0,
per_channel: bool = False,
) -> JTensor:
"""Scan mutliple factors on max value to get best bound value.
"""Scan multiple factors on max value to get best bound value.
This does a scan to get bound value(s) that minimize mean absolute error (MAE)
between original tensor 't' and quantized tensor. It's (almost) equivalent to
Expand Down Expand Up @@ -239,7 +239,7 @@ def reduce_precision(
Args:
t: Input tensor.
contract_dims: Speficies contracting dimesnions of the input tensor.
contract_dims: Specifies contracting dimensions of the input tensor.
need_gradient: If gradient is needed out of this function.
bits: Target number of bits.
optimization_on_bound: If MAE bound optimizer is used.
Expand Down Expand Up @@ -405,7 +405,7 @@ def pack_4bit(
packed_dtype: Target type to pack to, int32 or int8.
Returns:
int32 or int8 packed tensor where the pack_dim size is dividened by 8
int32 or int8 packed tensor where the pack_dim size is dividend by 8
from the original tensor x.
"""
x = jnp.asarray(x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ export declare interface WebGpuOptions {

// TODO: b/327685206 - Fill Adapter infor for LLM Web task
/**
* The information of WebGPU adapater, which will be used to optimize the
* The information of WebGPU adapter, which will be used to optimize the
* performance for LLM Inference task.
*/
adapterInfo?: GPUAdapterInfo;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ export class RagPipeline {
// We currently only use a single .wasm file and a single .data file (for
// the tasks that have to load assets). We need to revisit how we
// initialize the file locator if we ever need to differentiate between
// diffferent files.
// different files.
if (file.endsWith('.wasm')) {
return wasmFileset.wasmBinaryPath.toString();
} else if (wasmFileset.assetBinaryPath && file.endsWith('.data')) {
Expand Down

0 comments on commit 5193d0c

Please sign in to comment.