Skip to content

Commit

Permalink
Bump version to 0.14.0.dev0 (#1587)
Browse files Browse the repository at this point in the history
  • Loading branch information
irenedea authored Oct 14, 2024
1 parent 9b76532 commit 18a28f6
Show file tree
Hide file tree
Showing 6 changed files with 10 additions and 163 deletions.
2 changes: 1 addition & 1 deletion llmfoundry/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@

"""The LLM Foundry Version."""

__version__ = '0.13.0.dev0'
__version__ = '0.14.0.dev0'
23 changes: 3 additions & 20 deletions llmfoundry/command_utils/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import logging
import os
import time
import warnings
from typing import Any, Optional, Union

import pandas as pd
Expand Down Expand Up @@ -37,7 +36,6 @@
process_init_device,
)
from llmfoundry.utils.registry_utils import import_file
from llmfoundry.utils.warnings import VersionedDeprecationWarning

log = logging.getLogger(__name__)

Expand All @@ -63,7 +61,6 @@ def evaluate_model(
callback_configs: Optional[dict[str, Any]],
metadata: Optional[dict[str, str]],
logged_config: dict[str, Any],
fsdp_config: Optional[dict[str, Any]] = None,
parallelism_config: Optional[dict[str, Any]] = None,
should_log_config: bool = True,
load_path: Optional[str] = None,
Expand All @@ -78,18 +75,6 @@ def evaluate_model(
'parallelism_config cannot contain deprecated fsdp_config arguments.',
)

if fsdp_config:
warnings.warn(
VersionedDeprecationWarning(
'The argument fsdp_config is deprecated. Please use parallelism_config instead.',
remove_version='0.14.0',
),
)
if fsdp_config and parallelism_config:
raise ValueError(
'Both fsdp_config and parallelism_config cannot be provided at the same time. Please use parallelism_config.',
)

log.info(f'Evaluating model: {model_name}')
# Build tokenizer and model
tokenizer_cfg = tokenizer
Expand Down Expand Up @@ -127,7 +112,7 @@ def evaluate_model(
fsdp_config = parallelism_config.get(
'fsdp_config',
None,
) if parallelism_config else fsdp_config
) if parallelism_config else None
if fsdp_config and model.get('load_in_8bit', False):
raise ValueError(
'The FSDP config block is not supported when loading ' +
Expand Down Expand Up @@ -175,7 +160,7 @@ def evaluate_model(
callbacks=callbacks,
loggers=loggers,
precision=precision,
parallelism_config={'fsdp': fsdp_config},
parallelism_config=parallelism_config,
load_path=load_path,
load_weights_only=True,
progress_bar=False,
Expand Down Expand Up @@ -268,8 +253,6 @@ def evaluate(cfg: DictConfig) -> tuple[list[Trainer], pd.DataFrame]:
model_configs = eval_config.models
eval_gauntlet_config = eval_config.eval_gauntlet or eval_config.eval_gauntlet_str

fsdp_config = eval_config.fsdp_config

# Mandatory Evaluation Parameters
icl_tasks = eval_config.icl_tasks or eval_config.icl_tasks_str
if icl_tasks is None:
Expand Down Expand Up @@ -345,9 +328,9 @@ def evaluate(cfg: DictConfig) -> tuple[list[Trainer], pd.DataFrame]:
device_eval_batch_size=eval_config.device_eval_batch_size,
eval_gauntlet_config=eval_gauntlet_config,
eval_loader_config=eval_loader_config,
fsdp_config=fsdp_config,
loggers=loggers,
python_log_level=eval_config.python_log_level,
parallelism_config={'fsdp': eval_config.fsdp_config},
precision=eval_config.precision,
eval_gauntlet_df=eval_gauntlet_df,
callback_configs=eval_config.callbacks,
Expand Down
2 changes: 0 additions & 2 deletions llmfoundry/models/hf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
prepare_hf_model_for_fsdp,
)
from llmfoundry.models.hf.hf_t5 import ComposerHFT5
from llmfoundry.models.hf.model_wrapper import HuggingFaceModelWithFSDP

__all__ = [
'BaseHuggingFaceModel',
Expand All @@ -18,5 +17,4 @@
'prepare_hf_causal_lm_model_for_fsdp',
'prepare_hf_enc_dec_model_for_fsdp',
'prepare_hf_model_for_fsdp',
'HuggingFaceModelWithFSDP',
]
103 changes: 0 additions & 103 deletions llmfoundry/models/hf/model_wrapper.py

This file was deleted.

33 changes: 0 additions & 33 deletions tests/eval/test_eval_deprecation.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,36 +90,3 @@ def test_deprecation_warning_with_deprecated_arg(self):
'parallelism_config cannot contain deprecated fsdp_config arguments.',
str(context.exception),
)

def test_deprecation_warning_with_fsdp_config(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')

try:
evaluate_model(
**self.common_args,
parallelism_config=None,
fsdp_config={'verbose': True},
)
except Exception:
pass

self.assertTrue(
any(
issubclass(warning.category, VersionedDeprecationWarning)
for warning in w
),
)

def test_error_with_both_fsdp_and_parallelism_config(self):
with self.assertRaises(ValueError) as context:
evaluate_model(
**self.common_args,
parallelism_config={'some_arg': True},
fsdp_config={'some_arg': True},
)

self.assertIn(
'Both fsdp_config and parallelism_config cannot be provided at the same time.',
str(context.exception),
)
10 changes: 6 additions & 4 deletions tests/models/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,10 @@
from accelerate import init_empty_weights
from composer.core.precision import Precision, get_precision_context
from composer.distributed.dist_strategy import prepare_fsdp_module
from composer.models.huggingface import maybe_get_underlying_model
from composer.models.huggingface import (
HuggingFaceModel,
maybe_get_underlying_model,
)
from composer.optim import DecoupledAdamW
from composer.utils import (
FSDPConfig,
Expand All @@ -39,7 +42,6 @@

from llmfoundry import ComposerHFCausalLM
from llmfoundry.layers_registry import norms
from llmfoundry.models.hf.model_wrapper import HuggingFaceModelWithFSDP
from llmfoundry.models.layers import build_alibi_bias
from llmfoundry.models.layers.attention import (
check_alibi_support,
Expand Down Expand Up @@ -2560,7 +2562,7 @@ def test_hf_init(
False,
)

model = HuggingFaceModelWithFSDP(model, tokenizer)
model = HuggingFaceModel(model, tokenizer)

batch = gen_random_batch(batch_size, test_cfg)

Expand Down Expand Up @@ -2609,7 +2611,7 @@ def test_head_dim_8_flash_mqa_attn(batch_size: int = 2):

mpt = MPTForCausalLM(hf_config)

model = HuggingFaceModelWithFSDP(mpt, tokenizer, shift_labels=True)
model = HuggingFaceModel(mpt, tokenizer, shift_labels=True)

model = model.to(test_cfg.device)
batch = gen_random_batch(batch_size, test_cfg)
Expand Down

0 comments on commit 18a28f6

Please sign in to comment.