Skip to content

Commit

Permalink
use mixed underscore and dot (#70)
Browse files Browse the repository at this point in the history
Signed-off-by: youkaichao <[email protected]>
  • Loading branch information
youkaichao authored Nov 24, 2024
1 parent cbfbc00 commit 2acb742
Show file tree
Hide file tree
Showing 405 changed files with 7,153 additions and 4,002 deletions.
5 changes: 3 additions & 2 deletions depyf/explain/patched_lazy_format_graph_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@ def patched_lazy_format_graph_code(name, gm, maybe_id=None, **kwargs):
from depyf.utils import get_code_owner
func_name = get_current_compiled_fn_name()
file_name = name if name != func_name else "Captured Graph"
file_name = func_name + " " + file_name
file_name = file_name.replace(" ", "_")
file_name = func_name + "." + file_name
import inspect
import os

Expand Down Expand Up @@ -35,7 +36,7 @@ def patched_lazy_format_graph_code(name, gm, maybe_id=None, **kwargs):
src = simple_code + commented_src
if filepath is not None:
new_filepath = write_code_to_file_template(
src, os.path.dirname(filepath) + "/" + file_name + " " + "%s" + ".py")
src, os.path.dirname(filepath) + "/" + file_name + "." + "%s" + ".py")
scope = fn.__globals__
exec(compile(src, filename=new_filepath, mode="exec"), scope)
fn.__code__ = scope[fn.__name__].__code__
Expand Down
2 changes: 1 addition & 1 deletion depyf/explain/patched_load_by_key_path.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ def patched_load_by_key_path(

func_name = get_current_compiled_fn_name()
new_filepath = write_code_to_file_template(src, os.path.join(
dump_src_dir, func_name + " kernel " + "%s" + ".py"))
dump_src_dir, func_name + ".kernel_" + "%s" + ".py"))
path = new_filepath
return unpatched_load_by_key_path(key, path, linemap, attrs)

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
# Note: the following variables are used inside the guard function.
___check_tensors = '''None'''
___check_tensors_verbose = '''None'''
___check_global_state = '''<built-in method check of torch._C._dynamo.guards.GlobalStateGuard object at 0x1201acb30>'''
___check_torch_function_mode_stack = '''<function make_torch_function_mode_stack_guard.<locals>.check_torch_function_mode_stack at 0x125274f70>'''
___check_global_state = '''<built-in method check of torch._C._dynamo.guards.GlobalStateGuard object at 0x118194d70>'''
___check_torch_function_mode_stack = '''<function make_torch_function_mode_stack_guard.<locals>.check_torch_function_mode_stack at 0x11ed952d0>'''
Abs = '''<built-in function abs>'''
Eq = '''<built-in function eq>'''
Ne = '''<built-in function ne>'''
Expand All @@ -18,41 +18,51 @@
FloorDiv = '''<built-in function floordiv>'''
TrueDiv = '''<built-in function truediv>'''
PowByNatural = '''<built-in function pow>'''
IsNonOverlappingAndDenseIndicator = '''<function eval_is_non_overlapping_and_dense at 0x1152b4160>'''
IsNonOverlappingAndDenseIndicator = '''<function eval_is_non_overlapping_and_dense at 0x10cd5d240>'''
floor = '''<built-in function floor>'''
ceiling = '''<built-in function ceil>'''
FloorToInt = '''<built-in function floor>'''
FloatPow = '''<built-in function pow>'''
CeilToInt = '''<built-in function ceil>'''
cast_symbool_to_symint_guardless = '''<function cast_symbool_to_symint_guardless at 0x1152b4310>'''
cast_symbool_to_symint_guardless = '''<function cast_symbool_to_symint_guardless at 0x10cd5d3f0>'''
RoundToInt = '''<built-in function round>'''
RoundDecimal = '''<built-in function round>'''
TruncToInt = '''<built-in function trunc>'''
IntTrueDiv = '''<built-in function truediv>'''
FloatTrueDiv = '''<built-in function truediv>'''
ToFloat = '''<class 'float'>'''
OpaqueUnaryFn_cos = '''<built-in function cos>'''
OpaqueUnaryFn_cosh = '''<built-in function cosh>'''
OpaqueUnaryFn_acos = '''<built-in function acos>'''
OpaqueUnaryFn_sin = '''<built-in function sin>'''
OpaqueUnaryFn_sinh = '''<built-in function sinh>'''
OpaqueUnaryFn_asin = '''<built-in function asin>'''
OpaqueUnaryFn_tan = '''<built-in function tan>'''
OpaqueUnaryFn_tanh = '''<built-in function tanh>'''
OpaqueUnaryFn_atan = '''<built-in function atan>'''
OpaqueUnaryFn_sqrt = '''<built-in function sqrt>'''
___check_type_id = '''<built-in function check_type_id>'''
___check_obj_id = '''<built-in function check_obj_id>'''
___odict_getitem = '''<method '__getitem__' of 'dict' objects>'''
___key_to_id = '''<function key_to_id at 0x115396a70>'''
___key_to_id = '''<function key_to_id at 0x10ceafac0>'''
___dict_version = '''<built-in function dict_version>'''
___dict_contains = '''<function _get_closure_vars.<locals>.<lambda> at 0x1251bd2d0>'''
___dict_contains = '''<function _get_closure_vars.<locals>.<lambda> at 0x11ece1630>'''
___tuple_iterator_len = '''<method '__length_hint__' of 'tuple_iterator' objects>'''
___tuple_iterator_getitem = '''<function tuple_iterator_getitem at 0x1153965f0>'''
___get_torch_function_mode_stack_at = '''<function get_torch_function_mode_stack_at at 0x1153a5ab0>'''
___tuple_iterator_getitem = '''<function tuple_iterator_getitem at 0x10ceaf640>'''
___get_torch_function_mode_stack_at = '''<function get_torch_function_mode_stack_at at 0x10cecac20>'''
__math_isnan = '''<built-in function isnan>'''
__numpy_isnan = '''None'''
__numpy_isnan = '''<ufunc 'isnan'>'''
inf = '''inf'''
__load_module = '''<function import_module at 0x102dfb910>'''
__load_module = '''<function import_module at 0x1008eb910>'''
utils_device = '''<module 'torch.utils._device' from '/Users/youkaichao/anaconda3/envs/py310/lib/python3.10/site-packages/torch/utils/_device.py'>'''
device = '''<class 'torch.device'>'''
___from_numpy = '''<function from_numpy at 0x11551e8c0>'''
___as_tensor = '''<function _as_tensor_fullprec at 0x113216320>'''
___from_numpy = '''<function from_numpy at 0x10d3983a0>'''
___as_tensor = '''<function _as_tensor_fullprec at 0x10b39ca60>'''
torch = '''<module 'torch' from '/Users/youkaichao/anaconda3/envs/py310/lib/python3.10/site-packages/torch/__init__.py'>'''
inspect = '''<module 'inspect' from '/Users/youkaichao/anaconda3/envs/py310/lib/python3.10/inspect.py'>'''
def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored):
__guard_hit = True
__guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards
__guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards
__guard_hit = __guard_hit and ___check_global_state()
__guard_hit = __guard_hit and ___check_torch_function_mode_stack()
__guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1])
Expand Down Expand Up @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x):
# Note: the following variables are used inside the guard function.
___check_tensors = '''None'''
___check_tensors_verbose = '''None'''
___check_global_state = '''<built-in method check of torch._C._dynamo.guards.GlobalStateGuard object at 0x120079bb0>'''
___check_torch_function_mode_stack = '''<function make_torch_function_mode_stack_guard.<locals>.check_torch_function_mode_stack at 0x125235e10>'''
___check_global_state = '''<built-in method check of torch._C._dynamo.guards.GlobalStateGuard object at 0x118061d50>'''
___check_torch_function_mode_stack = '''<function make_torch_function_mode_stack_guard.<locals>.check_torch_function_mode_stack at 0x11ed39900>'''
Abs = '''<built-in function abs>'''
Eq = '''<built-in function eq>'''
Ne = '''<built-in function ne>'''
Expand All @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x):
FloorDiv = '''<built-in function floordiv>'''
TrueDiv = '''<built-in function truediv>'''
PowByNatural = '''<built-in function pow>'''
IsNonOverlappingAndDenseIndicator = '''<function eval_is_non_overlapping_and_dense at 0x1152b4160>'''
IsNonOverlappingAndDenseIndicator = '''<function eval_is_non_overlapping_and_dense at 0x10cd5d240>'''
floor = '''<built-in function floor>'''
ceiling = '''<built-in function ceil>'''
FloorToInt = '''<built-in function floor>'''
FloatPow = '''<built-in function pow>'''
CeilToInt = '''<built-in function ceil>'''
cast_symbool_to_symint_guardless = '''<function cast_symbool_to_symint_guardless at 0x1152b4310>'''
cast_symbool_to_symint_guardless = '''<function cast_symbool_to_symint_guardless at 0x10cd5d3f0>'''
RoundToInt = '''<built-in function round>'''
RoundDecimal = '''<built-in function round>'''
TruncToInt = '''<built-in function trunc>'''
IntTrueDiv = '''<built-in function truediv>'''
FloatTrueDiv = '''<built-in function truediv>'''
ToFloat = '''<class 'float'>'''
OpaqueUnaryFn_cos = '''<built-in function cos>'''
OpaqueUnaryFn_cosh = '''<built-in function cosh>'''
OpaqueUnaryFn_acos = '''<built-in function acos>'''
OpaqueUnaryFn_sin = '''<built-in function sin>'''
OpaqueUnaryFn_sinh = '''<built-in function sinh>'''
OpaqueUnaryFn_asin = '''<built-in function asin>'''
OpaqueUnaryFn_tan = '''<built-in function tan>'''
OpaqueUnaryFn_tanh = '''<built-in function tanh>'''
OpaqueUnaryFn_atan = '''<built-in function atan>'''
OpaqueUnaryFn_sqrt = '''<built-in function sqrt>'''
___check_type_id = '''<built-in function check_type_id>'''
___check_obj_id = '''<built-in function check_obj_id>'''
___odict_getitem = '''<method '__getitem__' of 'dict' objects>'''
___key_to_id = '''<function key_to_id at 0x115396a70>'''
___key_to_id = '''<function key_to_id at 0x10ceafac0>'''
___dict_version = '''<built-in function dict_version>'''
___dict_contains = '''<function _get_closure_vars.<locals>.<lambda> at 0x1251bd2d0>'''
___dict_contains = '''<function _get_closure_vars.<locals>.<lambda> at 0x11ece1630>'''
___tuple_iterator_len = '''<method '__length_hint__' of 'tuple_iterator' objects>'''
___tuple_iterator_getitem = '''<function tuple_iterator_getitem at 0x1153965f0>'''
___get_torch_function_mode_stack_at = '''<function get_torch_function_mode_stack_at at 0x1153a5ab0>'''
___tuple_iterator_getitem = '''<function tuple_iterator_getitem at 0x10ceaf640>'''
___get_torch_function_mode_stack_at = '''<function get_torch_function_mode_stack_at at 0x10cecac20>'''
__math_isnan = '''<built-in function isnan>'''
__numpy_isnan = '''None'''
__numpy_isnan = '''<ufunc 'isnan'>'''
inf = '''inf'''
__load_module = '''<function import_module at 0x102dfb910>'''
__load_module = '''<function import_module at 0x1008eb910>'''
utils_device = '''<module 'torch.utils._device' from '/Users/youkaichao/anaconda3/envs/py310/lib/python3.10/site-packages/torch/utils/_device.py'>'''
device = '''<class 'torch.device'>'''
___from_numpy = '''<function from_numpy at 0x11551e8c0>'''
___as_tensor = '''<function _as_tensor_fullprec at 0x113216320>'''
___from_numpy = '''<function from_numpy at 0x10d3983a0>'''
___as_tensor = '''<function _as_tensor_fullprec at 0x10b39ca60>'''
torch = '''<module 'torch' from '/Users/youkaichao/anaconda3/envs/py310/lib/python3.10/site-packages/torch/__init__.py'>'''
inspect = '''<module 'inspect' from '/Users/youkaichao/anaconda3/envs/py310/lib/python3.10/inspect.py'>'''
def __guard_0_for_toy_function(L, G, **___kwargs_ignored):
__guard_hit = True
__guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards
__guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards
__guard_hit = __guard_hit and ___check_global_state()
__guard_hit = __guard_hit and ___check_torch_function_mode_stack()
__guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1])
Expand All @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored):
__guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1])
__guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False
__guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b'])
__guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4359597216)
__guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4418871568)
__guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323994544)
__guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4405222576)
return __guard_hit

# Note: please refer to the graph code in __compiled_fn_1*.py.
Expand Down
Loading

0 comments on commit 2acb742

Please sign in to comment.