diff --git a/depyf/explain/patched_lazy_format_graph_code.py b/depyf/explain/patched_lazy_format_graph_code.py index 8cfa35da..a2f3b3c3 100644 --- a/depyf/explain/patched_lazy_format_graph_code.py +++ b/depyf/explain/patched_lazy_format_graph_code.py @@ -3,7 +3,8 @@ def patched_lazy_format_graph_code(name, gm, maybe_id=None, **kwargs): from depyf.utils import get_code_owner func_name = get_current_compiled_fn_name() file_name = name if name != func_name else "Captured Graph" - file_name = func_name + " " + file_name + file_name = file_name.replace(" ", "_") + file_name = func_name + "." + file_name import inspect import os @@ -35,7 +36,7 @@ def patched_lazy_format_graph_code(name, gm, maybe_id=None, **kwargs): src = simple_code + commented_src if filepath is not None: new_filepath = write_code_to_file_template( - src, os.path.dirname(filepath) + "/" + file_name + " " + "%s" + ".py") + src, os.path.dirname(filepath) + "/" + file_name + "." + "%s" + ".py") scope = fn.__globals__ exec(compile(src, filename=new_filepath, mode="exec"), scope) fn.__code__ = scope[fn.__name__].__code__ diff --git a/depyf/explain/patched_load_by_key_path.py b/depyf/explain/patched_load_by_key_path.py index adab9afc..14fdaaf4 100644 --- a/depyf/explain/patched_load_by_key_path.py +++ b/depyf/explain/patched_load_by_key_path.py @@ -16,6 +16,6 @@ def patched_load_by_key_path( func_name = get_current_compiled_fn_name() new_filepath = write_code_to_file_template(src, os.path.join( - dump_src_dir, func_name + " kernel " + "%s" + ".py")) + dump_src_dir, func_name + ".kernel_" + "%s" + ".py")) path = new_filepath return unpatched_load_by_key_path(key, path, linemap, attrs) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py index e7cd6503..845a00f9 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d37cf70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1338952d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d1c52d0>''' +___dict_contains = '''. at 0x132ee9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d4c9510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133b42f80>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d1c52d0>''' +___dict_contains = '''. at 0x132ee9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d37cf70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1338952d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d1c52d0>''' +___dict_contains = '''. at 0x132ee9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d4c9510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133b42f80>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d1c52d0>''' +___dict_contains = '''. at 0x132ee9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d33fbe0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133b41870>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d1c52d0>''' +___dict_contains = '''. at 0x132ee9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4374424736) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4708278544) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323191488) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4338112352) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d33de10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133839900>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d1c52d0>''' +___dict_contains = '''. at 0x132ee9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4374424736) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4708278544) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323191488) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4338112352) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 985eebc3..9ea82861 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138274f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1498952d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1381bd2d0>''' +___dict_contains = '''. at 0x1487e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1383c9e10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149a4af80>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1381bd2d0>''' +___dict_contains = '''. at 0x1487e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138274f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1498952d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1381bd2d0>''' +___dict_contains = '''. at 0x1487e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1383c9e10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149a4af80>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1381bd2d0>''' +___dict_contains = '''. at 0x1487e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138237d00>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149a49870>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1381bd2d0>''' +___dict_contains = '''. at 0x1487e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358089488) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4408384928) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4359334592) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4402075648) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138235e10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149835900>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1381bd2d0>''' +___dict_contains = '''. at 0x1487e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358089488) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4408384928) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4359334592) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4402075648) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 1c05e2a7..65688880 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131374f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13da952d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1312bd2d0>''' +___dict_contains = '''. at 0x13d9e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1314c9510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dc42f80>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1312bd2d0>''' +___dict_contains = '''. at 0x13d9e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131374f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13da952d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1312bd2d0>''' +___dict_contains = '''. at 0x13d9e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1314c9510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dc42f80>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1312bd2d0>''' +___dict_contains = '''. at 0x13d9e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131337d00>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dc41870>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1312bd2d0>''' +___dict_contains = '''. at 0x13d9e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371180704) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4420968720) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332678144) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4397882384) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131335e10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13da39900>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1312bd2d0>''' +___dict_contains = '''. at 0x13d9e5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371180704) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4420968720) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332678144) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4397882384) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 119085af..ac8d9fb8 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1324b65f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122bbaef0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1324648b0>''' +___dict_contains = '''. at 0x122b36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132720c10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed2950>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1324648b0>''' +___dict_contains = '''. at 0x122b36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1324b65f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122bbaef0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1324648b0>''' +___dict_contains = '''. at 0x122b36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132720c10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed2950>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1324648b0>''' +___dict_contains = '''. at 0x122b36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1324b7b50>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed1120>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1324648b0>''' +___dict_contains = '''. at 0x122b36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4336593600) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4409433424) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358187712) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424095744) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1324b5510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122bbadd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1324648b0>''' +___dict_contains = '''. at 0x122b36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4336593600) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4409433424) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358187712) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424095744) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 9201db6c..077690ef 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116dba5f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f1baef0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116d64ca0>''' +___dict_contains = '''. at 0x10f136950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117020670>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f3d2440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116d64ca0>''' +___dict_contains = '''. at 0x10f136950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116dba5f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f1baef0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116d64ca0>''' +___dict_contains = '''. at 0x10f136950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117020670>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f3d2440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116d64ca0>''' +___dict_contains = '''. at 0x10f136950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116dbbb50>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f1f1510>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116d64ca0>''' +___dict_contains = '''. at 0x10f136950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4329270432) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396851472) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4390972096) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4394457088) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116db9510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f1badd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116d64ca0>''' +___dict_contains = '''. at 0x10f136950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4329270432) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396851472) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4390972096) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4394457088) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py index a9ad0793..22909954 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118aba5f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131dbaef0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118a64ca0>''' +___dict_contains = '''. at 0x131d36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118d21870>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1333d6440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118a64ca0>''' +___dict_contains = '''. at 0x131d36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118aba5f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131dbaef0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118a64ca0>''' +___dict_contains = '''. at 0x131d36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118d21870>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1333d6440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118a64ca0>''' +___dict_contains = '''. at 0x131d36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118abbb50>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131df1120>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118a64ca0>''' +___dict_contains = '''. at 0x131d36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353043216) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439842128) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4357549056) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4563556992) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118ab9510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131dbadd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118a64ca0>''' +___dict_contains = '''. at 0x131d36950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353043216) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439842128) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4357549056) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4563556992) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py index 0088dbcb..a58c5141 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125274f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ed952d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1251bd2d0>''' +___dict_contains = '''. at 0x11ece1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125235e10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ed39900>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1251bd2d0>''' +___dict_contains = '''. at 0x11ece1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4359597216) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4418871568) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323994544) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4405222576) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 906a7d7a..9404d3d1 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e77cf70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d5952d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e6c52d0>''' +___dict_contains = '''. at 0x12d4e9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e739e10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d535900>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e6c52d0>''' +___dict_contains = '''. at 0x12d4e9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363938976) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4430405904) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364987072) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4430387200) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 7d5c494b..53e5d2ed 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x165368f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14e9952d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x15a8bd2d0>''' +___dict_contains = '''. at 0x1432f5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x165325e10>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14e939900>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x15a8bd2d0>''' +___dict_contains = '''. at 0x1432f5630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4414171840) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4878147056) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4395985600) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4717697024) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 7ca19c60..6ae8ad17 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1244b65f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1232baef0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1244648b0>''' +___dict_contains = '''. at 0x12323a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1244b5510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1232badd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1244648b0>''' +___dict_contains = '''. at 0x12323a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4392955040) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4583498000) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363840192) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4585576448) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py index f18bee8e..db4b5300 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c2b7490>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128cbaef0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c264ca0>''' +___dict_contains = '''. at 0x128c3a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c2b5510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128cbadd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c264ca0>''' +___dict_contains = '''. at 0x128c3a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393970448) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4720451216) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4351027904) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4354512896) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py index 9d242092..a0d6ddce 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11c6b65f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12a4baef0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11c664ca0>''' +___dict_contains = '''. at 0x12a436950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11c6b5510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12a4badd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11c664ca0>''' +___dict_contains = '''. at 0x12a436950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4405144496) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4475493872) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332350384) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4425145280) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py index 3f8c75d8..139847d6 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12984dab0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a768550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1297cba30>''' +___dict_contains = '''. at 0x11a6169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a913f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11b15f7f0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1297cba30>''' +___dict_contains = '''. at 0x11a6169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12984dab0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a768550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1297cba30>''' +___dict_contains = '''. at 0x11a6169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a913f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11b15f7f0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1297cba30>''' +___dict_contains = '''. at 0x11a6169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a53be0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a7379a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1297cba30>''' +___dict_contains = '''. at 0x11a6169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4361071376) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4407336432) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4384337056) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4454505648) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12976c040>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a736a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1297cba30>''' +___dict_contains = '''. at 0x11a6169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4361071376) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4407336432) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4384337056) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4454505648) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 61a075dd..b52ad084 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12174dab0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118868550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1216cba30>''' +___dict_contains = '''. at 0x10ff169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12228d3f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118a5bd90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1216cba30>''' +___dict_contains = '''. at 0x10ff169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12174dab0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118868550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1216cba30>''' +___dict_contains = '''. at 0x10ff169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12228d3f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118a5bd90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1216cba30>''' +___dict_contains = '''. at 0x10ff169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122257c70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1188339a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1216cba30>''' +___dict_contains = '''. at 0x10ff169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4360875168) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4577206464) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4352404160) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4419901440) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12166c040>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118832a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1216cba30>''' +___dict_contains = '''. at 0x10ff169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4360875168) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4577206464) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4352404160) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4419901440) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py index ff93e73f..7b41c9f9 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12124dab0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f368550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1210cba30>''' +___dict_contains = '''. at 0x10f1169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12148ed40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f55aa70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1210cba30>''' +___dict_contains = '''. at 0x10f1169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12124dab0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f368550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1210cba30>''' +___dict_contains = '''. at 0x10f1169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12148ed40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f55aa70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1210cba30>''' +___dict_contains = '''. at 0x10f1169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121453c70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f3339a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1210cba30>''' +___dict_contains = '''. at 0x10f1169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4327599264) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4410482960) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331858864) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4392409888) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12106c040>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f332a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1210cba30>''' +___dict_contains = '''. at 0x10f1169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4327599264) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4410482960) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331858864) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4392409888) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 07a67260..4334a22a 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110b4d510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc64790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110ac3a30>''' +___dict_contains = '''. at 0x11fb169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110b4e0e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc33490>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110ac3a30>''' +___dict_contains = '''. at 0x11fb169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110b4d510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc64790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110ac3a30>''' +___dict_contains = '''. at 0x11fb169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110b4e0e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc33490>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110ac3a30>''' +___dict_contains = '''. at 0x11fb169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110d536d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fe5bf40>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110ac3a30>''' +___dict_contains = '''. at 0x11fb169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4387024032) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4445085968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331170496) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4375861248) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110a681f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc32a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110ac3a30>''' +___dict_contains = '''. at 0x11fb169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4387024032) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4445085968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331170496) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4375861248) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 22d335f1..a774a33e 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12214d510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c868790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1220cba30>''' +___dict_contains = '''. at 0x10bf169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12238e950>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c833490>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1220cba30>''' +___dict_contains = '''. at 0x10bf169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12214d510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c868790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1220cba30>''' +___dict_contains = '''. at 0x10bf169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12238e950>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c833490>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1220cba30>''' +___dict_contains = '''. at 0x10bf169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1223579a0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10ca5bf40>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1220cba30>''' +___dict_contains = '''. at 0x10bf169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389956768) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4591886608) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4352551616) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4356036448) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12206c1f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c832a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1220cba30>''' +___dict_contains = '''. at 0x10bf169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389956768) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4591886608) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4352551616) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4356036448) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py index dd6ecc97..bbc840ad 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1096f5510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d64790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10966fa30>''' +___dict_contains = '''. at 0x132c169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1099369e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d33130>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10966fa30>''' +___dict_contains = '''. at 0x132c169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1096f5510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d64790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10966fa30>''' +___dict_contains = '''. at 0x132c169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1099369e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d33130>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10966fa30>''' +___dict_contains = '''. at 0x132c169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1098ff910>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d339a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10966fa30>''' +___dict_contains = '''. at 0x132c169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332317856) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4336640272) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391267328) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4839332720) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1096141f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d32a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10966fa30>''' +___dict_contains = '''. at 0x132c169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332317856) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4336640272) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391267328) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4839332720) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py index 124683c0..64ff14d2 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122c4dab0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13296c550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122acba30>''' +___dict_contains = '''. at 0x1328169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122a6c040>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132932a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122acba30>''' +___dict_contains = '''. at 0x1328169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4357549216) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4597129248) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4357728960) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4406204256) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py index c26b8317..f35bb144 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b24dab0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128068550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b1c3a30>''' +___dict_contains = '''. at 0x10ff129e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b16c040>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128032a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b1c3a30>''' +___dict_contains = '''. at 0x10ff129e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389137168) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4475493792) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4316769200) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4375633232) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 25d7ae8e..7e285c29 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10ef4dab0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13386c550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10edcba30>''' +___dict_contains = '''. at 0x13371a9e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10ed6c040>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133832a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10edcba30>''' +___dict_contains = '''. at 0x13371a9e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325551264) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395802896) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364643008) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4368128000) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py index d032b848..46738d0e 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11924d510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120f6c790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1191c3a30>''' +___dict_contains = '''. at 0x120e169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11916c1f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120f36a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1191c3a30>''' +___dict_contains = '''. at 0x120e169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4367707296) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4434600128) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332825280) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4348598112) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 285858ee..9faad93a 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10fb4d510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d46c790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10fac3a30>''' +___dict_contains = '''. at 0x12d3169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10fa6c1f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d436a70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10fac3a30>''' +___dict_contains = '''. at 0x12d3169e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4351290528) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4401045776) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4404898816) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4709308976) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py index 343da990..3e835984 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10d94d510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a36c790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10d8cba30>''' +___dict_contains = '''. at 0x11a21a9e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10d8681f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a33aa70>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10d8cba30>''' +___dict_contains = '''. at 0x11a21a9e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4384992416) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4389314832) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364380864) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4438775808) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.1.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 1.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.1.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.1.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.1.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py index ab398d48..6da6a82d 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1559c3e20>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122f8c3a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x15545a560>''' +___dict_contains = '''. at 0x122891a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x155dcd5a0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12368a320>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x15545a560>''' +___dict_contains = '''. at 0x122891a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1559c3e20>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122f8c3a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x15545a560>''' +___dict_contains = '''. at 0x122891a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x155dcd5a0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12368a320>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x15545a560>''' +___dict_contains = '''. at 0x122891a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1559b3eb0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12369e7a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x15545a560>''' +___dict_contains = '''. at 0x122891a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317375648) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4385317136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391529152) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395013984) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1559b3a30>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122f640d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x15545a560>''' +___dict_contains = '''. at 0x122891a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317375648) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4385317136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391529152) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395013984) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py index d8ac57e6..74acf1da 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12be07e20>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14c5803a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b566560>''' +___dict_contains = '''. at 0x14b791a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d2eedd0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14cdb63b0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b566560>''' +___dict_contains = '''. at 0x14b791a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12be07e20>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14c5803a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b566560>''' +___dict_contains = '''. at 0x14b791a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d2eedd0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14cdb63b0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b566560>''' +___dict_contains = '''. at 0x14b791a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12bfe4310>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14cdb57e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b566560>''' +___dict_contains = '''. at 0x14b791a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4339723104) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4406287856) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331203264) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4334688256) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12bdc7a30>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14c5640d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b566560>''' +___dict_contains = '''. at 0x14b791a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4339723104) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4406287856) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331203264) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4334688256) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 52e84fa4..0c5c9ee2 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157103e20>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1375883a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a666560>''' +___dict_contains = '''. at 0x136f91a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1576e6710>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x137f14e50>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a666560>''' +___dict_contains = '''. at 0x136f91a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157103e20>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1375883a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a666560>''' +___dict_contains = '''. at 0x136f91a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1576e6710>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x137f14e50>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a666560>''' +___dict_contains = '''. at 0x136f91a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1576396c0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x137e268c0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a666560>''' +___dict_contains = '''. at 0x136f91a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4404735136) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4587692304) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323371712) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4326856704) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1570c3a30>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1375040d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a666560>''' +___dict_contains = '''. at 0x136f91a20>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4404735136) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4587692304) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323371712) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4326856704) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 804c11ca..31ef0b2f 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c828af0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127b0add0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bcecdc0>''' +___dict_contains = '''. at 0x1274d9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cf5c160>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1501fb5b0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bcecdc0>''' +___dict_contains = '''. at 0x1274d9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c828af0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127b0add0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bcecdc0>''' +___dict_contains = '''. at 0x1274d9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cf5c160>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1501fb5b0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bcecdc0>''' +___dict_contains = '''. at 0x1274d9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cbfc790>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x150039a20>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bcecdc0>''' +___dict_contains = '''. at 0x1274d9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364692240) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4417822192) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4376423664) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428291088) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c7eb520>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1279eb910>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bcecdc0>''' +___dict_contains = '''. at 0x1274d9630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364692240) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4417822192) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4376423664) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428291088) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 10722cb9..78ba9a21 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d43caf0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12830add0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c7f4dc0>''' +___dict_contains = '''. at 0x11fdd1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12da50160>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a95cf0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c7f4dc0>''' +___dict_contains = '''. at 0x11fdd1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d43caf0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12830add0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c7f4dc0>''' +___dict_contains = '''. at 0x11fdd1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12da50160>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a95cf0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c7f4dc0>''' +___dict_contains = '''. at 0x11fdd1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d7f9870>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1299393f0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c7f4dc0>''' +___dict_contains = '''. at 0x11fdd1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393757856) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4398080272) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4421512192) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4456602160) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d3ef520>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1282eb910>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c7f4dc0>''' +___dict_contains = '''. at 0x11fdd1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393757856) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4398080272) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4421512192) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4456602160) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py index ea76f58a..d62c0611 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12423caf0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13890add0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1237f8dc0>''' +___dict_contains = '''. at 0x12fad1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1245fba30>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13914fac0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1237f8dc0>''' +___dict_contains = '''. at 0x12fad1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12423caf0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13890add0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1237f8dc0>''' +___dict_contains = '''. at 0x12fad1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1245fba30>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13914fac0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1237f8dc0>''' +___dict_contains = '''. at 0x12fad1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1245f8790>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138908160>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1237f8dc0>''' +___dict_contains = '''. at 0x12fad1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326845600) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4374831136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4409993920) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4721891328) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1241eb520>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1388eb910>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_toy_function(a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1237f8dc0>''' +___dict_contains = '''. at 0x12fad1630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326845600) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4374831136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4409993920) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4721891328) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py index b0a52a22..306a9c75 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11c382200>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13a0c97e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11b966ef0>''' +___dict_contains = '''. at 0x11d252560>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118cfcb80>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13a065000>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11b966ef0>''' +___dict_contains = '''. at 0x11d252560>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4328647840) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4388462864) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4397214400) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4400699392) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 084697a5..e4cb4a15 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f24ae60>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1226cd5a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e17ae60>''' +___dict_contains = '''. at 0x116d19b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1192fcb80>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122665000>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e17ae60>''' +___dict_contains = '''. at 0x116d19b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332842144) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4393705744) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4384386048) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4387888304) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 7e554919..c5c1f043 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x150b76ef0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12f4c9360>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x150366e60>''' +___dict_contains = '''. at 0x11ff924d0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x124decb80>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12f465000>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x150366e60>''' +___dict_contains = '''. at 0x11ff924d0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350930080) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4422017296) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325911472) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4329413808) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py new file mode 100644 index 00000000..21d4d18b --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (div, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py new file mode 100644 index 00000000..c7d3fcde --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -0,0 +1,9 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 2ace3f25..0d050b57 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13ba40af0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129275900>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13aeecdc0>''' +___dict_contains = '''. at 0x11e2cd6c0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13b8ef520>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129194430>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13aeecdc0>''' +___dict_contains = '''. at 0x11e2cd6c0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354878624) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4370637072) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4396493504) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4464990208) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 05bef819..3001117e 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x124524af0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12957cca0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x123becdc0>''' +___dict_contains = '''. at 0x128bcd510>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1244eb520>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1294943a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x123becdc0>''' +___dict_contains = '''. at 0x128bcd510>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350307488) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4426211600) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330106016) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4333608112) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py index 17dbf688..e56786c1 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d024af0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157d78ca0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c700dc0>''' +___dict_contains = '''. at 0x1572d9510>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cfeb520>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157ca05e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12c700dc0>''' +___dict_contains = '''. at 0x1572d9510>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364889248) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4707229728) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317178560) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4367472640) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py index bc221cfa..2896bbee 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168692830>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e6a2b90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1685eeb90>''' +___dict_contains = '''. at 0x11e5d7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1689f8ca0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ea1a440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1685eeb90>''' +___dict_contains = '''. at 0x11e5d7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168692830>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e6a2b90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1685eeb90>''' +___dict_contains = '''. at 0x11e5d7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1689f8ca0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ea1a440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1685eeb90>''' +___dict_contains = '''. at 0x11e5d7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1686927a0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e96f130>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1685eeb90>''' +___dict_contains = '''. at 0x11e5d7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371180384) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404190864) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326304448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4329789280) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16865f6d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e6631c0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1685eeb90>''' +___dict_contains = '''. at 0x11e5d7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371180384) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404190864) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326304448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4329789280) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py index 66ef0877..c1c5fda2 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13db96830>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1570a2b90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13daf2b90>''' +___dict_contains = '''. at 0x156fd3130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f3f8ca0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15731e440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13daf2b90>''' +___dict_contains = '''. at 0x156fd3130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13db96830>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1570a2b90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13daf2b90>''' +___dict_contains = '''. at 0x156fd3130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f3f8ca0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15731e440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13daf2b90>''' +___dict_contains = '''. at 0x156fd3130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13db96d40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157273130>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13daf2b90>''' +___dict_contains = '''. at 0x156fd3130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4405177024) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4706180752) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325485488) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4393688080) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13db636d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15705f1c0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13daf2b90>''' +___dict_contains = '''. at 0x156fd3130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4405177024) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4706180752) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325485488) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4393688080) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py index ee003628..cd2b25aa 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125e96830>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x130ea2b90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x125df2b90>''' +___dict_contains = '''. at 0x130dd7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1260f8160>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13121e440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x125df2b90>''' +___dict_contains = '''. at 0x130dd7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125e96830>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x130ea2b90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x125df2b90>''' +___dict_contains = '''. at 0x130dd7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1260f8160>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13121e440>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x125df2b90>''' +___dict_contains = '''. at 0x130dd7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125e96d40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131173130>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x125df2b90>''' +___dict_contains = '''. at 0x130dd7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4318194848) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4368539920) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322306752) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4338112352) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125e636d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x130e631c0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x125df2b90>''' +___dict_contains = '''. at 0x130dd7130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4318194848) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4368539920) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322306752) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4338112352) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py index 911fb113..76a72386 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116128f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11101f400>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11606d1b0>''' +___dict_contains = '''. at 0x110f537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1163335b0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11127c040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11606d1b0>''' +___dict_contains = '''. at 0x110f537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116128f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11101f400>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11606d1b0>''' +___dict_contains = '''. at 0x110f537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1163335b0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11127c040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11606d1b0>''' +___dict_contains = '''. at 0x110f537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1163309d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1111c3760>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11606d1b0>''' +___dict_contains = '''. at 0x110f537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317784848) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4373782160) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4386548656) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4390051152) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1160eed40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11101c700>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11606d1b0>''' +___dict_contains = '''. at 0x110f537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317784848) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4373782160) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4386548656) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4390051152) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py index 6813ba93..c1c11733 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f728f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11161b400>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f66d1b0>''' +___dict_contains = '''. at 0x1114537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f9375b0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111878040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f66d1b0>''' +___dict_contains = '''. at 0x1114537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f728f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11161b400>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f66d1b0>''' +___dict_contains = '''. at 0x1114537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f9375b0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111878040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f66d1b0>''' +___dict_contains = '''. at 0x1114537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f9349d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1117c3760>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f66d1b0>''' +___dict_contains = '''. at 0x1114537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323535632) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358053520) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391561920) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395046912) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f6eed40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111618700>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f66d1b0>''' +___dict_contains = '''. at 0x1114537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323535632) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358053520) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391561920) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395046912) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py index 31c80f47..93736ec8 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122328f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12161b400>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12226d1b0>''' +___dict_contains = '''. at 0x1214537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1226335b0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121874040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12226d1b0>''' +___dict_contains = '''. at 0x1214537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122328f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12161b400>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12226d1b0>''' +___dict_contains = '''. at 0x1214537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1226335b0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121874040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12226d1b0>''' +___dict_contains = '''. at 0x1214537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1226309d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121618ca0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12226d1b0>''' +___dict_contains = '''. at 0x1214537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4385008800) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4418871568) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325763776) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4384249696) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1222eed40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121618700>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12226d1b0>''' +___dict_contains = '''. at 0x1214537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4385008800) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4418871568) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325763776) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4384249696) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py index ff0979d9..e4e9efa6 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128096830>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f2a2b90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f7f2b90>''' +___dict_contains = '''. at 0x11f1db130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1280636d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f2631c0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f7f2b90>''' +___dict_contains = '''. at 0x11f1db130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350356640) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4427260176) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364233408) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4369569792) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py index 78ff5a9e..84103380 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11dd9e830>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1507a2b90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11dcfab90>''' +___dict_contains = '''. at 0x1506db130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11dd6b6d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1507671c0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11dcfab90>''' +___dict_contains = '''. at 0x1506db130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4329401504) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4406288576) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4397870080) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4585577488) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py index 428c3c25..5e329d01 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1198d2830>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14a7a2b90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11982eb90>''' +___dict_contains = '''. at 0x14a6db130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11989f6d0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14a7631c0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11982eb90>''' +___dict_contains = '''. at 0x14a6db130>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317146272) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4321468688) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391267008) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428289888) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py index 6bdbcece..d7715145 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14a328f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117a1b400>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a26d1b0>''' +___dict_contains = '''. at 0x1179537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14a2ead40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117a18700>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a26d1b0>''' +___dict_contains = '''. at 0x1179537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321143968) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4408385808) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332367008) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4335868944) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py index 3be67a04..bdf1cf2a 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x123d28f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11b21f400>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x123c6d1b0>''' +___dict_contains = '''. at 0x11b1537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x123cead40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11b21c700>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x123c6d1b0>''' +___dict_contains = '''. at 0x11b1537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4386073760) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4411531536) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354763456) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4432484192) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py index c1b34b9f..ef789c96 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118528f70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e717400>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11846d1b0>''' +___dict_contains = '''. at 0x11e6537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1184ead40>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e714700>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11846d1b0>''' +___dict_contains = '''. at 0x11e6537f0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4324682512) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4394753680) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389432000) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4468135936) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py index 68a8923e..ec3a2bf3 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ca8b370>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122c95f30>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11c99dbd0>''' +___dict_contains = '''. at 0x122c02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11cf9d5a0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed2f80>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11c99dbd0>''' +___dict_contains = '''. at 0x122c02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ca8b370>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122c95f30>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11c99dbd0>''' +___dict_contains = '''. at 0x122c02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11cf9d5a0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed2f80>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11c99dbd0>''' +___dict_contains = '''. at 0x122c02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11cf9ec20>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed0c10>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11c99dbd0>''' +___dict_contains = '''. at 0x122c02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321963168) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4371685648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364249952) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4575091408) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ca897e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122c94790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11c99dbd0>''' +___dict_contains = '''. at 0x122c02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321963168) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4371685648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364249952) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4575091408) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py index 210eb0bb..d3e39f25 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12018b370>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120195cf0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120099bd0>''' +___dict_contains = '''. at 0x120106950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12039d5a0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1204d2dd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120099bd0>''' +___dict_contains = '''. at 0x120106950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12018b370>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120195cf0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120099bd0>''' +___dict_contains = '''. at 0x120106950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12039d5a0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1204d2dd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120099bd0>''' +___dict_contains = '''. at 0x120106950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12039ec20>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1204d0ee0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120099bd0>''' +___dict_contains = '''. at 0x120106950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355796128) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4423065872) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332448448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4335933440) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1201897e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120194550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120099bd0>''' +___dict_contains = '''. at 0x120106950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355796128) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4423065872) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332448448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4335933440) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py index 04edf9c3..4968f42e 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x115683370>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b295cf0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x115599bd0>''' +___dict_contains = '''. at 0x12b20a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11599d510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b4ab0a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x115599bd0>''' +___dict_contains = '''. at 0x12b20a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x115683370>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b295cf0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x115599bd0>''' +___dict_contains = '''. at 0x12b20a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11599d510>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b4ab0a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x115599bd0>''' +___dict_contains = '''. at 0x12b20a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11599ec20>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b4a8ee0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x115599bd0>''' +___dict_contains = '''. at 0x12b20a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330286240) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4371685648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4388055744) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4391540576) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1156817e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b294550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x115599bd0>''' +___dict_contains = '''. at 0x12b20a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330286240) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4371685648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4388055744) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4391540576) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py index 2b58c232..561cac35 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x119f86dd0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d395e10>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x119e9dbd0>''' +___dict_contains = '''. at 0x12d306b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a2a37f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d1f5360>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x119e9dbd0>''' +___dict_contains = '''. at 0x12d306b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x119f86dd0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d395e10>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x119e9dbd0>''' +___dict_contains = '''. at 0x12d306b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a2a37f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d1f5360>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x119e9dbd0>''' +___dict_contains = '''. at 0x12d306b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a058820>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d69beb0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x119e9dbd0>''' +___dict_contains = '''. at 0x12d306b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354895008) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439843008) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358761632) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4412562608) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x119f857e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d394790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x119e9dbd0>''' +___dict_contains = '''. at 0x12d306b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354895008) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439843008) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358761632) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4412562608) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py index 81be5374..25bebcd6 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12038add0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112c94550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120299bd0>''' +___dict_contains = '''. at 0x112ac8280>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12059f7f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112f9bd90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120299bd0>''' +___dict_contains = '''. at 0x112ac8280>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12038add0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112c94550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120299bd0>''' +___dict_contains = '''. at 0x112ac8280>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12059f7f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112f9bd90>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120299bd0>''' +___dict_contains = '''. at 0x112ac8280>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120454820>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112f9be20>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120299bd0>''' +___dict_contains = '''. at 0x112ac8280>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4360973472) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428308672) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393380544) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4415707136) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1203897e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112c94280>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120299bd0>''' +___dict_contains = '''. at 0x112ac8280>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4360973472) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428308672) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393380544) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4415707136) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py index 08ddc00e..e9b0e2fc 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11708add0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118695e10>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116e99bd0>''' +___dict_contains = '''. at 0x118602b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11729feb0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11872b370>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116e99bd0>''' +___dict_contains = '''. at 0x118602b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11708add0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118695e10>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116e99bd0>''' +___dict_contains = '''. at 0x118602b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11729feb0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11872b370>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116e99bd0>''' +___dict_contains = '''. at 0x118602b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117158820>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11889beb0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116e99bd0>''' +___dict_contains = '''. at 0x118602b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321356480) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396850832) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325026496) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4403124224) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1170897e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118694790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116e99bd0>''' +___dict_contains = '''. at 0x118602b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321356480) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396850832) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325026496) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4403124224) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py index 0f6e7f59..3df1ba23 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10cb8b370>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117b95f30>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ca9dbd0>''' +___dict_contains = '''. at 0x117b02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10cb897e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117b94790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ca9dbd0>''' +___dict_contains = '''. at 0x117b02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4328762528) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4387414288) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364970688) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4423047008) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py index 098cbab8..e2f169c9 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10bc8b370>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121395cf0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10bb99bd0>''' +___dict_contains = '''. at 0x121306950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10bc897e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121394550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10bb99bd0>''' +___dict_contains = '''. at 0x121306950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358646944) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4362969360) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353452736) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4379006976) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py index 4c0974e7..5920d6ca 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10de8b370>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x115d95cf0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10dd99bd0>''' +___dict_contains = '''. at 0x115d0a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10de897e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x115d94550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10dd99bd0>''' +___dict_contains = '''. at 0x115d0a950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4328500384) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4388462864) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4318472896) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4321957888) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py index a15de2b6..82ee3d8d 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11868add0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x124a98550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11859dbd0>''' +___dict_contains = '''. at 0x1249c4280>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1186897e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x124a98280>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11859dbd0>''' +___dict_contains = '''. at 0x1249c4280>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4390382752) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4440891664) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332088000) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4335572992) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py index e288a62c..044aa93a 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12bd86dd0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117f95e10>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bc99bd0>''' +___dict_contains = '''. at 0x117f02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12bd857e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117f94790>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bc99bd0>''' +___dict_contains = '''. at 0x117f02b90>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364758176) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4774338832) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4362693312) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4394735456) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py index 725171a1..59308f73 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10a586dd0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e895bd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10a499bd0>''' +___dict_contains = '''. at 0x11e806950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10a5857e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e894550>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10a499bd0>''' +___dict_contains = '''. at 0x11e806950>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4319145120) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4323467536) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4328336064) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4376811360) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py index b6aecbd9..bd7e9171 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12ed55630>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1639ba0e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12e7d0b80>''' +___dict_contains = '''. at 0x161f87880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dd00b80>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1640000d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12e7d0b80>''' +___dict_contains = '''. at 0x161f87880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12ed55630>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1639ba0e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12e7d0b80>''' +___dict_contains = '''. at 0x161f87880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dd00b80>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1640000d0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12e7d0b80>''' +___dict_contains = '''. at 0x161f87880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13daee200>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1639b81f0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12e7d0b80>''' +___dict_contains = '''. at 0x161f87880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4392938656) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4427260176) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354878144) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358363136) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12ecdd360>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16390d990>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12e7d0b80>''' +___dict_contains = '''. at 0x161f87880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4392938656) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4427260176) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354878144) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358363136) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py index 16867c2f..afcffc21 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127f6d630>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e9b60e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1278ecb80>''' +___dict_contains = '''. at 0x11d76f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1303f8b80>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ef70040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1278ecb80>''' +___dict_contains = '''. at 0x11d76f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127f6d630>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e9b60e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1278ecb80>''' +___dict_contains = '''. at 0x11d76f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1303f8b80>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ef70040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1278ecb80>''' +___dict_contains = '''. at 0x11d76f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1302eee60>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e9b4310>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1278ecb80>''' +___dict_contains = '''. at 0x11d76f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4398361360) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4570914448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4362792096) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439825424) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127f39360>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e911990>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1278ecb80>''' +___dict_contains = '''. at 0x11d76f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4398361360) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4570914448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4362792096) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439825424) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py index 5781b0d8..a00f2924 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x152571630>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11feb60e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f6e8b80>''' +___dict_contains = '''. at 0x11db73880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15d508b80>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12dbc0040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f6e8b80>''' +___dict_contains = '''. at 0x11db73880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x152571630>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11feb60e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f6e8b80>''' +___dict_contains = '''. at 0x11db73880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15d508b80>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12dbc0040>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f6e8b80>''' +___dict_contains = '''. at 0x11db73880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1528eee60>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11feb4310>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f6e8b80>''' +___dict_contains = '''. at 0x11db73880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371787072) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424114208) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4366363328) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4434581344) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x152549360>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fe0d990>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f6e8b80>''' +___dict_contains = '''. at 0x11db73880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371787072) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424114208) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4366363328) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4434581344) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py index 03e7e449..b56f3b2c 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121f4d240>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11db26dd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12163a680>''' +___dict_contains = '''. at 0x11d5beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1223725f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f1ad750>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12163a680>''' +___dict_contains = '''. at 0x11d5beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121f4d240>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11db26dd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12163a680>''' +___dict_contains = '''. at 0x11d5beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1223725f0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f1ad750>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12163a680>''' +___dict_contains = '''. at 0x11d5beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12230d5a0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f1439a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12163a680>''' +___dict_contains = '''. at 0x11d5beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330629904) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4344422032) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353108672) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4412561408) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121f4ce50>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11db25240>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12163a680>''' +___dict_contains = '''. at 0x11d5beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330629904) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4344422032) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353108672) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4412561408) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py index 1e2e8bea..3b11680e 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112b5d240>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e42edd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112236680>''' +___dict_contains = '''. at 0x11debeef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x113080430>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e965750>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112236680>''' +___dict_contains = '''. at 0x11debeef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112b5d240>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e42edd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112236680>''' +___dict_contains = '''. at 0x11debeef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x113080430>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e965750>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112236680>''' +___dict_contains = '''. at 0x11debeef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1130169e0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e9039a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112236680>''' +___dict_contains = '''. at 0x11debeef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4388433056) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404191504) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355467968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424095744) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112b5ce50>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e42d240>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112236680>''' +___dict_contains = '''. at 0x11debeef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4388433056) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404191504) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355467968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424095744) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py index df3f442d..3b63da58 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110e4d240>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x136a32dd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11043e680>''' +___dict_contains = '''. at 0x1363baef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -95,8 +105,8 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111178430>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1370b1750>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -111,41 +121,51 @@ def transformed___resume_at_38_3(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11043e680>''' +___dict_contains = '''. at 0x1363baef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -154,6 +174,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -190,8 +212,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110e4d240>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x136a32dd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -206,41 +228,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11043e680>''' +___dict_contains = '''. at 0x1363baef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -283,8 +315,8 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111178430>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1370b1750>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -299,41 +331,51 @@ def transformed___resume_at_38_9(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11043e680>''' +___dict_contains = '''. at 0x1363baef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -342,6 +384,8 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. @@ -378,8 +422,8 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11111aa70>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1370479a0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -394,41 +438,51 @@ def transformed___resume_at_30_8(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11043e680>''' +___dict_contains = '''. at 0x1363baef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -437,8 +491,10 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391431328) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395753744) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350241472) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4373764096) + __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) + __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. @@ -463,8 +519,8 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110e4ce50>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x136a31240>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -479,41 +535,51 @@ def __transformed_code_1_for_forward(self, a, b): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11043e680>''' +___dict_contains = '''. at 0x1363baef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -522,8 +588,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391431328) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395753744) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350241472) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4373764096) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py index 512666c8..bad5144e 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x148f6d630>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d2b60e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1487ecb80>''' +___dict_contains = '''. at 0x11d36f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x148efd360>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d215990>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1487ecb80>''' +___dict_contains = '''. at 0x11d36f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355566752) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4399997200) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4359580352) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4417804288) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py index 5bb30e92..c9d22ebf 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cd69630>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x142bb60e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bdacb80>''' +___dict_contains = '''. at 0x11fb6f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cd3d360>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x142b0d990>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12bdacb80>''' +___dict_contains = '''. at 0x11fb6f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325420192) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4374831376) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4383255472) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4461845440) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py index 3daa7a59..a6d5b6a9 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12f46d630>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1141b60e0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12dcecb80>''' +___dict_contains = '''. at 0x11326f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12f3fd360>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x114105990>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12dcecb80>''' +___dict_contains = '''. at 0x11326f880>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389924000) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4411531536) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4397673152) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4401158144) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py similarity index 100% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py index eb029876..a05d45aa 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127d3d240>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133532dd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12563a680>''' +___dict_contains = '''. at 0x1327beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127d3ce50>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133531240>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12563a680>''' +___dict_contains = '''. at 0x1327beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391611152) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4578254320) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322356144) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4375862608) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py index 2bde9abb..b24c4798 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12db65240>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128d32dd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d13a680>''' +___dict_contains = '''. at 0x11f7beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12db64e50>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128d31240>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d13a680>''' +___dict_contains = '''. at 0x11f7beef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4369083232) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4701986448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389006336) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4420951056) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py index f363c215..b6f01413 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125165240>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11bc36dd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12462e680>''' +___dict_contains = '''. at 0x11adbaef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -108,8 +118,8 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125164e50>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11bc35240>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -124,41 +134,51 @@ def transformed___resume_at_30_2(b, x): FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12462e680>''' +___dict_contains = '''. at 0x11adbaef0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -167,8 +187,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4348963280) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4563574496) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356991920) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4360494256) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. diff --git a/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/multiprocessing/__compiled_fn_1.Captured_Graph.0.py similarity index 100% rename from tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py rename to tests/depyf_output/multiprocessing/__compiled_fn_1.Captured_Graph.0.py diff --git a/tests/depyf_output/multiprocessing/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/multiprocessing/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py similarity index 100% rename from tests/depyf_output/multiprocessing/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename to tests/depyf_output/multiprocessing/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_0.py b/tests/depyf_output/multiprocessing/full_code_for_f_0.py index a95fd45d..b78a1e4a 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_0.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x123441bd0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fba7880>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1232f5240>''' +___dict_contains = '''. at 0x11fb769e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_1.py b/tests/depyf_output/multiprocessing/full_code_for_f_1.py index 788edff6..1cbb4306 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_1.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x126841bd0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1224a7880>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1267f1240>''' +___dict_contains = '''. at 0x1224769e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_2.py b/tests/depyf_output/multiprocessing/full_code_for_f_2.py index 620cd9fa..f7cffe52 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_2.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120a41bd0>''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f5a7880>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -18,41 +18,51 @@ FloorDiv = '''''' TrueDiv = '''''' PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' FloatTrueDiv = '''''' ToFloat = '''''' +OpaqueUnaryFn_cos = '''''' +OpaqueUnaryFn_cosh = '''''' +OpaqueUnaryFn_acos = '''''' +OpaqueUnaryFn_sin = '''''' +OpaqueUnaryFn_sinh = '''''' +OpaqueUnaryFn_asin = '''''' +OpaqueUnaryFn_tan = '''''' +OpaqueUnaryFn_tanh = '''''' +OpaqueUnaryFn_atan = '''''' +OpaqueUnaryFn_sqrt = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1209f1240>''' +___dict_contains = '''. at 0x10f5769e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''None''' +__numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1])