diff --git a/examples/webgpu-embedding-benchmark/index.html b/examples/webgpu-embedding-benchmark/index.html
index 6ceeae4ab..8b4a9d361 100644
--- a/examples/webgpu-embedding-benchmark/index.html
+++ b/examples/webgpu-embedding-benchmark/index.html
@@ -9,7 +9,7 @@
This benchmark measures the execution time of BERT-based embedding models
diff --git a/examples/webgpu-video-background-removal/index.html b/examples/webgpu-video-background-removal/index.html
index 59e0a8428..8e71df5a9 100644
--- a/examples/webgpu-video-background-removal/index.html
+++ b/examples/webgpu-video-background-removal/index.html
@@ -10,7 +10,7 @@
Runs locally in your browser, powered by
diff --git a/examples/webgpu-video-depth-estimation/index.html b/examples/webgpu-video-depth-estimation/index.html
index 9dd633e54..c05574f67 100644
--- a/examples/webgpu-video-depth-estimation/index.html
+++ b/examples/webgpu-video-depth-estimation/index.html
@@ -14,7 +14,7 @@
diff --git a/package.json b/package.json
index 32594e90f..e41c3543a 100644
--- a/package.json
+++ b/package.json
@@ -37,11 +37,11 @@
"readme": "python ./docs/scripts/build_readme.py",
"docs-api": "node ./docs/scripts/generate.js",
"docs-preview": "doc-builder preview transformers.js ./docs/source/ --not_python_module",
- "docs-build": "doc-builder build transformers.js ./docs/source/ --not_python_module --build_dir ./docs/build/ --repo_owner xenova"
+ "docs-build": "doc-builder build transformers.js ./docs/source/ --not_python_module --build_dir ./docs/build/"
},
"repository": {
"type": "git",
- "url": "git+https://github.com/xenova/transformers.js.git"
+ "url": "git+https://github.com/huggingface/transformers.js.git"
},
"keywords": [
"transformers",
@@ -57,9 +57,9 @@
"author": "Hugging Face",
"license": "Apache-2.0",
"bugs": {
- "url": "https://github.com/xenova/transformers.js/issues"
+ "url": "https://github.com/huggingface/transformers.js/issues"
},
- "homepage": "https://github.com/xenova/transformers.js#readme",
+ "homepage": "https://github.com/huggingface/transformers.js#readme",
"dependencies": {
"@huggingface/jinja": "^0.3.0",
"onnxruntime-node": "1.19.2",
diff --git a/src/models.js b/src/models.js
index 85f9294c3..b7d2b0ee2 100644
--- a/src/models.js
+++ b/src/models.js
@@ -71,6 +71,10 @@ import {
getModelJSON,
} from './utils/hub.js';
+import {
+ GITHUB_ISSUE_URL,
+} from './utils/constants.js';
+
import {
LogitsProcessorList,
ForcedBOSTokenLogitsProcessor,
@@ -910,7 +914,7 @@ export class PreTrainedModel extends Callable {
} else { // should be MODEL_TYPES.EncoderOnly
if (modelType !== MODEL_TYPES.EncoderOnly) {
- console.warn(`Model type for '${modelName ?? config?.model_type}' not found, assuming encoder-only architecture. Please report this at https://github.com/xenova/transformers.js/issues/new/choose.`)
+ console.warn(`Model type for '${modelName ?? config?.model_type}' not found, assuming encoder-only architecture. Please report this at ${GITHUB_ISSUE_URL}.`)
}
info = await Promise.all([
constructSessions(pretrained_model_name_or_path, {
@@ -4897,7 +4901,7 @@ export class PyAnnoteModel extends PyAnnotePreTrainedModel { }
* **Example:** Load and run a `PyAnnoteForAudioFrameClassification` for speaker diarization.
*
* ```javascript
- * import { AutoProcessor, AutoModelForAudioFrameClassification, read_audio } from '@xenova/transformers';
+ * import { AutoProcessor, AutoModelForAudioFrameClassification, read_audio } from '@huggingface/transformers';
*
* // Load model and processor
* const model_id = 'onnx-community/pyannote-segmentation-3.0';
diff --git a/src/tokenizers.js b/src/tokenizers.js
index d4c72242a..5b4e0170c 100644
--- a/src/tokenizers.js
+++ b/src/tokenizers.js
@@ -283,7 +283,7 @@ const PROBLEMATIC_REGEX_MAP = new Map([
["(?i:'s|'t|'re|'ve|'m|'ll|'d)", "(?:'([sS]|[tT]|[rR][eE]|[vV][eE]|[mM]|[lL][lL]|[dD]))"],
// Used to override the default (invalid) regex of the bloom pretokenizer.
- // For more information, see https://github.com/xenova/transformers.js/issues/94
+ // For more information, see https://github.com/huggingface/transformers.js/issues/94
[` ?[^(\\s|[${BLOOM_SPLIT_CHARS}])]+`, ` ?[^\\s${BLOOM_SPLIT_CHARS}]+`],
])
@@ -2577,7 +2577,7 @@ export class PreTrainedTokenizer extends Callable {
// Another slight hack to add `end_of_word_suffix` (if present) to the decoder
// This is needed for cases where BPE model and ByteLevel decoder are used
- // For more information, see https://github.com/xenova/transformers.js/issues/74
+ // For more information, see https://github.com/huggingface/transformers.js/issues/74
// TODO: save this to the decoder when exporting?
this.decoder.end_of_word_suffix = this.model.end_of_word_suffix;
}
diff --git a/src/utils/constants.js b/src/utils/constants.js
index 7dff34b8d..9d0e9ee42 100644
--- a/src/utils/constants.js
+++ b/src/utils/constants.js
@@ -1,2 +1,2 @@
-export const GITHUB_ISSUE_URL = 'https://github.com/xenova/transformers.js/issues/new/choose';
\ No newline at end of file
+export const GITHUB_ISSUE_URL = 'https://github.com/huggingface/transformers.js/issues/new/choose';
\ No newline at end of file