Skip to content

Commit

Permalink
Merge branch 'main' into fix_fsdp_with_fp8_in_trainer
Browse files Browse the repository at this point in the history
  • Loading branch information
eljandoubi authored Oct 25, 2024
2 parents 33902fd + e447185 commit cfd8152
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 2 deletions.
3 changes: 2 additions & 1 deletion tests/quantization/bnb/test_4bit.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
BitsAndBytesConfig,
pipeline,
)
from transformers.models.opt.modeling_opt import OPTAttention
from transformers.testing_utils import (
apply_skip_if_not_implemented,
is_bitsandbytes_available,
Expand Down Expand Up @@ -565,7 +566,7 @@ def test_training(self):

# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(module)):
if isinstance(module, OPTAttention):
module.q_proj = LoRALayer(module.q_proj, rank=16)
module.k_proj = LoRALayer(module.k_proj, rank=16)
module.v_proj = LoRALayer(module.v_proj, rank=16)
Expand Down
3 changes: 2 additions & 1 deletion tests/quantization/bnb/test_mixed_int8.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
BitsAndBytesConfig,
pipeline,
)
from transformers.models.opt.modeling_opt import OPTAttention
from transformers.testing_utils import (
apply_skip_if_not_implemented,
is_accelerate_available,
Expand Down Expand Up @@ -868,7 +869,7 @@ def test_training(self):

# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(module)):
if isinstance(module, OPTAttention):
module.q_proj = LoRALayer(module.q_proj, rank=16)
module.k_proj = LoRALayer(module.k_proj, rank=16)
module.v_proj = LoRALayer(module.v_proj, rank=16)
Expand Down

0 comments on commit cfd8152

Please sign in to comment.