Skip to content

Commit

Permalink
fix serialize
Browse files Browse the repository at this point in the history
Signed-off-by: youkaichao <[email protected]>
  • Loading branch information
youkaichao committed Dec 11, 2024
1 parent e3f0a14 commit 51a1efb
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions vllm/compilation/backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ def compiled_graph(*args):

def mocked_compiled_fx_graph_hash(*args, **kwargs):
out = compiled_fx_graph_hash(*args, **kwargs)
# store the hash in the cache
nonlocal cache_data
cache_data[(runtime_shape, graph_index)] = out[0]
return out
Expand Down Expand Up @@ -598,8 +599,8 @@ def __call__(self, *args) -> Any:
# save the hash of the inductor graph for the next run
with open(self.compilation_config.inductor_hash_cache_path,
"w") as f:
print(dict(self.compilation_config.inductor_hash_cache),
file=f)
f.write(self.compilation_config.inductor_hash_cache.
serialize())
end_monitoring_torch_compile(self.vllm_config)

if not entry.use_cudagraph:
Expand Down

0 comments on commit 51a1efb

Please sign in to comment.