diff --git a/docs/source/en/model_doc/llava_next.md b/docs/source/en/model_doc/llava_next.md index ef74bf7e104ed5..a2a3913fcad7b8 100644 --- a/docs/source/en/model_doc/llava_next.md +++ b/docs/source/en/model_doc/llava_next.md @@ -98,7 +98,7 @@ print(processor.decode(output[0], skip_special_tokens=True)) ### Quantization using Bitsandbytes -The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes`` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with: +The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with: ```python from transformers import LlavaNextForConditionalGeneration, BitsAndBytesConfig diff --git a/docs/source/en/model_doc/seggpt.md b/docs/source/en/model_doc/seggpt.md index 707be240174629..f821fc14a08c54 100644 --- a/docs/source/en/model_doc/seggpt.md +++ b/docs/source/en/model_doc/seggpt.md @@ -36,7 +36,7 @@ import torch from datasets import load_dataset from transformers import SegGptImageProcessor, SegGptForImageSegmentation -model_id = "BAAI/seggpt-vit-large" +checkpoint = "BAAI/seggpt-vit-large" image_processor = SegGptImageProcessor.from_pretrained(checkpoint) model = SegGptForImageSegmentation.from_pretrained(checkpoint) @@ -87,4 +87,4 @@ The original code can be found [here]([(https://github.com/baaivision/Painter/tr ## SegGptForImageSegmentation [[autodoc]] SegGptForImageSegmentation - - forward \ No newline at end of file + - forward