diff --git a/evalscope/evaluator/evaluator.py b/evalscope/evaluator/evaluator.py index 1b78b2f..f894411 100644 --- a/evalscope/evaluator/evaluator.py +++ b/evalscope/evaluator/evaluator.py @@ -86,6 +86,7 @@ def __init__(self, **kwargs) # Get prompts from dataset + # TODO: support sampler self.prompts = self.data_adapter.gen_prompts(data_dict=self.dataset) del self.dataset diff --git a/evalscope/perf/main.py b/evalscope/perf/main.py index 969bae8..36b33e5 100644 --- a/evalscope/perf/main.py +++ b/evalscope/perf/main.py @@ -19,7 +19,9 @@ def run_perf_benchmark(args): args = Arguments(**args) elif isinstance(args, Namespace): args = Arguments.from_args(args) - seed_everything(args.seed) + + if args.seed is not None: + seed_everything(args.seed) # Setup logger and output args.outputs_dir = get_output_path(args)