diff --git a/_downloads/3195443a0ced3cabc0ad643537bdb5cd/introyt1_tutorial.ipynb b/_downloads/3195443a0ced3cabc0ad643537bdb5cd/introyt1_tutorial.ipynb index c7aa404408..885ca957fe 100644 --- a/_downloads/3195443a0ced3cabc0ad643537bdb5cd/introyt1_tutorial.ipynb +++ b/_downloads/3195443a0ced3cabc0ad643537bdb5cd/introyt1_tutorial.ipynb @@ -34,7 +34,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c338aa9a", + "id": "c9887eb2", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ }, { "cell_type": "markdown", - "id": "1425faf3", + "id": "97e989fc", "metadata": {}, "source": [ "\n", diff --git a/_downloads/4355e2cef7d17548f1e25f97a62828c4/template_tutorial.ipynb b/_downloads/4355e2cef7d17548f1e25f97a62828c4/template_tutorial.ipynb index 7d8a175ead..b6e0b42e38 100644 --- a/_downloads/4355e2cef7d17548f1e25f97a62828c4/template_tutorial.ipynb +++ b/_downloads/4355e2cef7d17548f1e25f97a62828c4/template_tutorial.ipynb @@ -31,7 +31,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bf671db4", + "id": "693f57f4", "metadata": {}, "outputs": [], "source": [ @@ -47,7 +47,7 @@ }, { "cell_type": "markdown", - "id": "0bef4b2d", + "id": "f05af391", "metadata": {}, "source": [ "\n", diff --git a/_downloads/63a0f0fc7b3ffb15d3a5ac8db3d521ee/tensors_deeper_tutorial.ipynb b/_downloads/63a0f0fc7b3ffb15d3a5ac8db3d521ee/tensors_deeper_tutorial.ipynb index af164d707f..22056eaf5b 100644 --- a/_downloads/63a0f0fc7b3ffb15d3a5ac8db3d521ee/tensors_deeper_tutorial.ipynb +++ b/_downloads/63a0f0fc7b3ffb15d3a5ac8db3d521ee/tensors_deeper_tutorial.ipynb @@ -34,7 +34,7 @@ { "cell_type": "code", "execution_count": null, - "id": "511ca8b5", + "id": "7bf7fbaa", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ }, { "cell_type": "markdown", - "id": "c43f4f6d", + "id": "7369a4bc", "metadata": {}, "source": [ "\n", diff --git a/_downloads/770632dd3941d2a51b831c52ded57aa2/trainingyt.ipynb b/_downloads/770632dd3941d2a51b831c52ded57aa2/trainingyt.ipynb index 4fd79cb7c7..289d17905e 100644 --- a/_downloads/770632dd3941d2a51b831c52ded57aa2/trainingyt.ipynb +++ b/_downloads/770632dd3941d2a51b831c52ded57aa2/trainingyt.ipynb @@ -35,7 +35,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b4d1f47d", + "id": "752a2985", "metadata": {}, "outputs": [], "source": [ @@ -51,7 +51,7 @@ }, { "cell_type": "markdown", - "id": "cb97e0b6", + "id": "34368a58", "metadata": {}, "source": [ "\n", diff --git a/_downloads/c28f42852d456daf9af72da6c6909556/captumyt.ipynb b/_downloads/c28f42852d456daf9af72da6c6909556/captumyt.ipynb index 07623ee36b..161161f908 100644 --- a/_downloads/c28f42852d456daf9af72da6c6909556/captumyt.ipynb +++ b/_downloads/c28f42852d456daf9af72da6c6909556/captumyt.ipynb @@ -37,7 +37,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dccff559", + "id": "453759d9", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +53,7 @@ }, { "cell_type": "markdown", - "id": "c9b546a5", + "id": "c7acdec3", "metadata": {}, "source": [ "\n", diff --git a/_downloads/e2e556f6b4693c2cef716dd7f40caaf6/tensorboardyt_tutorial.ipynb b/_downloads/e2e556f6b4693c2cef716dd7f40caaf6/tensorboardyt_tutorial.ipynb index 70563bf3e5..f289e3b056 100644 --- a/_downloads/e2e556f6b4693c2cef716dd7f40caaf6/tensorboardyt_tutorial.ipynb +++ b/_downloads/e2e556f6b4693c2cef716dd7f40caaf6/tensorboardyt_tutorial.ipynb @@ -35,7 +35,7 @@ { "cell_type": "code", "execution_count": null, - "id": "895519f4", + "id": "d42cde5b", "metadata": {}, "outputs": [], "source": [ @@ -51,7 +51,7 @@ }, { "cell_type": "markdown", - "id": "32e7ef55", + "id": "69464519", "metadata": {}, "source": [ "\n", diff --git a/_downloads/ed9d4f94afb79f7dada6742a06c486a5/autogradyt_tutorial.ipynb b/_downloads/ed9d4f94afb79f7dada6742a06c486a5/autogradyt_tutorial.ipynb index 4605a928de..5f4c154026 100644 --- a/_downloads/ed9d4f94afb79f7dada6742a06c486a5/autogradyt_tutorial.ipynb +++ b/_downloads/ed9d4f94afb79f7dada6742a06c486a5/autogradyt_tutorial.ipynb @@ -34,7 +34,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0e2fd20a", + "id": "05d049be", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ }, { "cell_type": "markdown", - "id": "496e5d03", + "id": "2a29a41c", "metadata": {}, "source": [ "\n", diff --git a/_downloads/fe726e041160526cf828806536922cf6/modelsyt_tutorial.ipynb b/_downloads/fe726e041160526cf828806536922cf6/modelsyt_tutorial.ipynb index e05ba5a108..7c90c66f87 100644 --- a/_downloads/fe726e041160526cf828806536922cf6/modelsyt_tutorial.ipynb +++ b/_downloads/fe726e041160526cf828806536922cf6/modelsyt_tutorial.ipynb @@ -34,7 +34,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55e9f783", + "id": "ffaf9618", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ }, { "cell_type": "markdown", - "id": "ba1af94e", + "id": "cb906f49", "metadata": {}, "source": [ "\n", diff --git a/_images/sphx_glr_coding_ddpg_001.png b/_images/sphx_glr_coding_ddpg_001.png index 6aec7f2e32..1f1594b1a9 100644 Binary files a/_images/sphx_glr_coding_ddpg_001.png and b/_images/sphx_glr_coding_ddpg_001.png differ diff --git a/_images/sphx_glr_dqn_with_rnn_tutorial_001.png b/_images/sphx_glr_dqn_with_rnn_tutorial_001.png index 3b7be9d036..83fc03dbeb 100644 Binary files a/_images/sphx_glr_dqn_with_rnn_tutorial_001.png and b/_images/sphx_glr_dqn_with_rnn_tutorial_001.png differ diff --git a/_images/sphx_glr_neural_style_tutorial_004.png b/_images/sphx_glr_neural_style_tutorial_004.png index 7e19916806..20f6fff2c5 100644 Binary files a/_images/sphx_glr_neural_style_tutorial_004.png and b/_images/sphx_glr_neural_style_tutorial_004.png differ diff --git a/_images/sphx_glr_reinforcement_ppo_001.png b/_images/sphx_glr_reinforcement_ppo_001.png index 1890c4dd57..c1125b9625 100644 Binary files a/_images/sphx_glr_reinforcement_ppo_001.png and b/_images/sphx_glr_reinforcement_ppo_001.png differ diff --git a/_images/sphx_glr_reinforcement_q_learning_001.png b/_images/sphx_glr_reinforcement_q_learning_001.png index 3dfa99a329..023132b167 100644 Binary files a/_images/sphx_glr_reinforcement_q_learning_001.png and b/_images/sphx_glr_reinforcement_q_learning_001.png differ diff --git a/_images/sphx_glr_spatial_transformer_tutorial_001.png b/_images/sphx_glr_spatial_transformer_tutorial_001.png index 3798f9b6e6..75a9125077 100644 Binary files a/_images/sphx_glr_spatial_transformer_tutorial_001.png and b/_images/sphx_glr_spatial_transformer_tutorial_001.png differ diff --git a/_images/sphx_glr_torchvision_tutorial_002.png b/_images/sphx_glr_torchvision_tutorial_002.png index 9cb6348913..157fabad90 100644 Binary files a/_images/sphx_glr_torchvision_tutorial_002.png and b/_images/sphx_glr_torchvision_tutorial_002.png differ diff --git a/_sources/advanced/coding_ddpg.rst.txt b/_sources/advanced/coding_ddpg.rst.txt index d50c41d914..3a2fa3db0c 100644 --- a/_sources/advanced/coding_ddpg.rst.txt +++ b/_sources/advanced/coding_ddpg.rst.txt @@ -1632,26 +1632,26 @@ modules we need. 0%| | 0/10000 [00:00 + @@ -513,165 +513,160 @@ up our dataset. Downloading tokenizer_config.json: 0%| | 0.00/49.0 [00:00 @@ -1496,23 +1491,23 @@ zero-shot pruning, or pruning without fine-tuning / retraining. 0%| | 0/43 [00:00 + @@ -806,7 +806,7 @@ https://colab.research.google.com/drive/1HiICg6jRkBnr5hvK2-VnMi88Vi9pUzEJ .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.216 seconds) + **Total running time of the script:** ( 0 minutes 0.224 seconds) .. _sphx_glr_download_beginner_Intro_to_TorchScript_tutorial.py: diff --git a/_sources/beginner/basics/autogradqs_tutorial.rst.txt b/_sources/beginner/basics/autogradqs_tutorial.rst.txt index 8ff63f83c5..85612544e4 100644 --- a/_sources/beginner/basics/autogradqs_tutorial.rst.txt +++ b/_sources/beginner/basics/autogradqs_tutorial.rst.txt @@ -113,8 +113,8 @@ documentation `__. .. code-block:: none - Gradient function for z = - Gradient function for loss = + Gradient function for z = + Gradient function for loss = @@ -395,7 +395,7 @@ Further Reading .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.013 seconds) + **Total running time of the script:** ( 0 minutes 0.011 seconds) .. _sphx_glr_download_beginner_basics_autogradqs_tutorial.py: diff --git a/_sources/beginner/basics/buildmodel_tutorial.rst.txt b/_sources/beginner/basics/buildmodel_tutorial.rst.txt index 17f8150237..81bc61c8cf 100644 --- a/_sources/beginner/basics/buildmodel_tutorial.rst.txt +++ b/_sources/beginner/basics/buildmodel_tutorial.rst.txt @@ -482,7 +482,7 @@ Further Reading .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.314 seconds) + **Total running time of the script:** ( 0 minutes 0.409 seconds) .. _sphx_glr_download_beginner_basics_buildmodel_tutorial.py: diff --git a/_sources/beginner/basics/data_tutorial.rst.txt b/_sources/beginner/basics/data_tutorial.rst.txt index 70427119f3..cf6ca82967 100644 --- a/_sources/beginner/basics/data_tutorial.rst.txt +++ b/_sources/beginner/basics/data_tutorial.rst.txt @@ -103,45 +103,56 @@ We load the `FashionMNIST Dataset `_. Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz to data/FashionMNIST/raw/train-images-idx3-ubyte.gz 0%| | 0/26421880 [00:00`_. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 1 minutes 10.135 seconds) + **Total running time of the script:** ( 1 minutes 10.633 seconds) .. _sphx_glr_download_beginner_basics_quickstart_tutorial.py: diff --git a/_sources/beginner/basics/saveloadrun_tutorial.rst.txt b/_sources/beginner/basics/saveloadrun_tutorial.rst.txt index 899f5e8f9c..6b871e241b 100644 --- a/_sources/beginner/basics/saveloadrun_tutorial.rst.txt +++ b/_sources/beginner/basics/saveloadrun_tutorial.rst.txt @@ -76,37 +76,37 @@ method: Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /var/lib/ci-user/.cache/torch/hub/checkpoints/vgg16-397923af.pth 0%| | 0.00/528M [00:00 - - + + + @@ -436,7 +436,7 @@ implements all these methods. Using it is very simple: .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.067 seconds) + **Total running time of the script:** ( 0 minutes 0.065 seconds) .. _sphx_glr_download_beginner_blitz_neural_networks_tutorial.py: diff --git a/_sources/beginner/blitz/tensor_tutorial.rst.txt b/_sources/beginner/blitz/tensor_tutorial.rst.txt index aea54e33df..91709a6c9d 100644 --- a/_sources/beginner/blitz/tensor_tutorial.rst.txt +++ b/_sources/beginner/blitz/tensor_tutorial.rst.txt @@ -555,7 +555,7 @@ Changes in the NumPy array reflects in the tensor. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.037 seconds) + **Total running time of the script:** ( 0 minutes 0.035 seconds) .. _sphx_glr_download_beginner_blitz_tensor_tutorial.py: diff --git a/_sources/beginner/chatbot_tutorial.rst.txt b/_sources/beginner/chatbot_tutorial.rst.txt index fdf270d6fa..905334820d 100644 --- a/_sources/beginner/chatbot_tutorial.rst.txt +++ b/_sources/beginner/chatbot_tutorial.rst.txt @@ -5767,7 +5767,7 @@ in PyTorch! .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 5 minutes 53.802 seconds) + **Total running time of the script:** ( 6 minutes 3.924 seconds) .. _sphx_glr_download_beginner_chatbot_tutorial.py: diff --git a/_sources/beginner/data_loading_tutorial.rst.txt b/_sources/beginner/data_loading_tutorial.rst.txt index 5bca72d958..10da37c803 100644 --- a/_sources/beginner/data_loading_tutorial.rst.txt +++ b/_sources/beginner/data_loading_tutorial.rst.txt @@ -63,7 +63,7 @@ installed: .. code-block:: none - + @@ -661,7 +661,7 @@ For an example with training code, please see .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 2.751 seconds) + **Total running time of the script:** ( 0 minutes 2.960 seconds) .. _sphx_glr_download_beginner_data_loading_tutorial.py: diff --git a/_sources/beginner/dcgan_faces_tutorial.rst.txt b/_sources/beginner/dcgan_faces_tutorial.rst.txt index 6b735dce3f..15bc0d4616 100644 --- a/_sources/beginner/dcgan_faces_tutorial.rst.txt +++ b/_sources/beginner/dcgan_faces_tutorial.rst.txt @@ -1284,42 +1284,42 @@ animation.
- +
- + oninput="anim2c476523c4c64710b35ecba471b1458a.set_frame(parseInt(this.value));">
- - - - - - - - -
-
- - - Once + - - Loop + - +
@@ -1329,9 +1329,9 @@ animation. /* Instantiate the Animation class. */ /* The IDs given should match those used in the template above. */ (function() { - var img_id = "_anim_img7c63fcf7433f420ea5c7b4e4ae5eee02"; - var slider_id = "_anim_slider7c63fcf7433f420ea5c7b4e4ae5eee02"; - var loop_select_id = "_anim_loop_select7c63fcf7433f420ea5c7b4e4ae5eee02"; + var img_id = "_anim_img2c476523c4c64710b35ecba471b1458a"; + var slider_id = "_anim_slider2c476523c4c64710b35ecba471b1458a"; + var loop_select_id = "_anim_loop_select2c476523c4c64710b35ecba471b1458a"; var frames = new Array(17); frames[0] = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyAAAAMgCAYAAADbcAZoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90\ @@ -233662,7 +233662,7 @@ animation. /* set a timeout to make sure all the above elements are created before the object is initialized. */ setTimeout(function() { - anim7c63fcf7433f420ea5c7b4e4ae5eee02 = new Animation(frames, img_id, slider_id, 1000.0, + anim2c476523c4c64710b35ecba471b1458a = new Animation(frames, img_id, slider_id, 1000.0, loop_select_id); }, 0); })() @@ -233735,7 +233735,7 @@ could go from here. You could: .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 7 minutes 1.778 seconds) + **Total running time of the script:** ( 6 minutes 55.802 seconds) .. _sphx_glr_download_beginner_dcgan_faces_tutorial.py: diff --git a/_sources/beginner/deploy_seq2seq_hybrid_frontend_tutorial.rst.txt b/_sources/beginner/deploy_seq2seq_hybrid_frontend_tutorial.rst.txt index 0a8259ac5f..182d5c16ad 100644 --- a/_sources/beginner/deploy_seq2seq_hybrid_frontend_tutorial.rst.txt +++ b/_sources/beginner/deploy_seq2seq_hybrid_frontend_tutorial.rst.txt @@ -1135,7 +1135,7 @@ of torch.save(model, PATH). .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.722 seconds) + **Total running time of the script:** ( 0 minutes 0.753 seconds) .. _sphx_glr_download_beginner_deploy_seq2seq_hybrid_frontend_tutorial.py: diff --git a/_sources/beginner/examples_nn/polynomial_nn.rst.txt b/_sources/beginner/examples_nn/polynomial_nn.rst.txt index 7b34166c82..0ef039810b 100644 --- a/_sources/beginner/examples_nn/polynomial_nn.rst.txt +++ b/_sources/beginner/examples_nn/polynomial_nn.rst.txt @@ -144,7 +144,7 @@ input and may have some trainable weights. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.870 seconds) + **Total running time of the script:** ( 0 minutes 0.791 seconds) .. _sphx_glr_download_beginner_examples_nn_polynomial_nn.py: diff --git a/_sources/beginner/examples_tensor/polynomial_numpy.rst.txt b/_sources/beginner/examples_tensor/polynomial_numpy.rst.txt index d83fc833fb..d76718281c 100644 --- a/_sources/beginner/examples_tensor/polynomial_numpy.rst.txt +++ b/_sources/beginner/examples_tensor/polynomial_numpy.rst.txt @@ -113,7 +113,7 @@ generic numeric computations. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.357 seconds) + **Total running time of the script:** ( 0 minutes 0.348 seconds) .. _sphx_glr_download_beginner_examples_tensor_polynomial_numpy.py: diff --git a/_sources/beginner/examples_tensor/polynomial_tensor.rst.txt b/_sources/beginner/examples_tensor/polynomial_tensor.rst.txt index b43393ef16..a641416bf2 100644 --- a/_sources/beginner/examples_tensor/polynomial_tensor.rst.txt +++ b/_sources/beginner/examples_tensor/polynomial_tensor.rst.txt @@ -123,7 +123,7 @@ just cast the Tensor to a cuda datatype. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.356 seconds) + **Total running time of the script:** ( 0 minutes 0.326 seconds) .. _sphx_glr_download_beginner_examples_tensor_polynomial_tensor.py: diff --git a/_sources/beginner/fgsm_tutorial.rst.txt b/_sources/beginner/fgsm_tutorial.rst.txt index f5b68525f7..c4a04caa3b 100644 --- a/_sources/beginner/fgsm_tutorial.rst.txt +++ b/_sources/beginner/fgsm_tutorial.rst.txt @@ -180,7 +180,7 @@ follows: .. code-block:: none - + @@ -267,7 +267,7 @@ pretrained weights. Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz to ../data/MNIST/raw/train-images-idx3-ubyte.gz 0%| | 0/9912422 [00:00\n', 'nokia\n', 'ec\n', 'virgin\n', '2011\n'] Downloading builder script: 0%| | 0.00/5.02k [00:00] + [] @@ -363,17 +363,17 @@ to the function with no history of its own. .. code-block:: none d: - - ((, 0), (None, 0)) - ((, 0), (None, 0)) - ((, 0),) + + ((, 0), (None, 0)) + ((, 0), (None, 0)) + ((, 0),) () c: - + b: - + a: None @@ -417,7 +417,7 @@ call the ``backward()`` method on the output, and check the input’s -1.4142e+00, -1.0000e+00, -5.1764e-01, 2.3850e-08, 5.1764e-01, 1.0000e+00, 1.4142e+00, 1.7321e+00, 1.9319e+00, 2.0000e+00]) - [] + [] @@ -928,17 +928,17 @@ example usage: ------------------------------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ Name Self CPU % Self CPU CPU total % CPU total CPU time avg Self CUDA Self CUDA % CUDA total CUDA time avg # of Calls ------------------------------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ - cudaEventRecord 45.49% 9.176ms 45.49% 9.176ms 2.294us 0.000us 0.00% 0.000us 0.000us 4000 - aten::div 27.78% 5.604ms 27.78% 5.604ms 5.604us 16.214ms 50.47% 16.214ms 16.214us 1000 - aten::mul 25.75% 5.194ms 25.75% 5.194ms 5.194us 15.909ms 49.53% 15.909ms 15.909us 1000 - cudaGetDeviceProperties_v2 0.86% 174.000us 0.86% 174.000us 174.000us 0.000us 0.00% 0.000us 0.000us 1 - cudaDeviceSynchronize 0.05% 11.000us 0.05% 11.000us 11.000us 0.000us 0.00% 0.000us 0.000us 1 + cudaEventRecord 44.91% 9.594ms 44.91% 9.594ms 2.398us 0.000us 0.00% 0.000us 0.000us 4000 + aten::div 28.08% 5.998ms 28.08% 5.998ms 5.998us 16.504ms 50.27% 16.504ms 16.504us 1000 + aten::mul 26.05% 5.565ms 26.05% 5.565ms 5.565us 16.325ms 49.73% 16.325ms 16.325us 1000 + cudaGetDeviceProperties_v2 0.84% 179.000us 0.84% 179.000us 179.000us 0.000us 0.00% 0.000us 0.000us 1 + cudaDeviceSynchronize 0.07% 14.000us 0.07% 14.000us 14.000us 0.000us 0.00% 0.000us 0.000us 1 cudaStreamIsCapturing 0.04% 8.000us 0.04% 8.000us 2.000us 0.000us 0.00% 0.000us 0.000us 4 - cudaDeviceGetStreamPriorityRange 0.01% 3.000us 0.01% 3.000us 3.000us 0.000us 0.00% 0.000us 0.000us 1 + cudaDeviceGetStreamPriorityRange 0.02% 4.000us 0.02% 4.000us 4.000us 0.000us 0.00% 0.000us 0.000us 1 cudaGetDeviceCount 0.01% 2.000us 0.01% 2.000us 1.000us 0.000us 0.00% 0.000us 0.000us 2 ------------------------------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ - Self CPU time total: 20.172ms - Self CUDA time total: 32.123ms + Self CPU time total: 21.364ms + Self CUDA time total: 32.829ms @@ -1214,7 +1214,7 @@ API ` .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.770 seconds) + **Total running time of the script:** ( 0 minutes 0.800 seconds) .. _sphx_glr_download_beginner_introyt_autogradyt_tutorial.py: diff --git a/_sources/beginner/introyt/introyt1_tutorial.rst.txt b/_sources/beginner/introyt/introyt1_tutorial.rst.txt index 6a9fd5e903..feff809eb0 100644 --- a/_sources/beginner/introyt/introyt1_tutorial.rst.txt +++ b/_sources/beginner/introyt/introyt1_tutorial.rst.txt @@ -598,35 +598,23 @@ automobile, ship, truck): Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./data/cifar-10-python.tar.gz 0%| | 0/170498071 [00:00 - tensor([[1.1645e-42, 5.0000e-01, 0.0000e+00, 0.0000e+00], - [5.5173e-40, 0.0000e+00, 1.7937e-40, 0.0000e+00], - [1.4132e-28, 0.0000e+00, 2.1667e-34, 4.6243e-44]]) + tensor([[-1.1307e-21, 4.5810e-41, 6.2454e+03, 0.0000e+00], + [ 2.2589e-37, 0.0000e+00, 0.0000e+00, 0.0000e+00], + [ 1.1645e-42, 5.0000e-01, 0.0000e+00, 0.0000e+00]]) @@ -271,17 +271,17 @@ have the ``torch.*_like()`` methods: .. code-block:: none torch.Size([2, 2, 3]) - tensor([[[7.4917e-11, 0.0000e+00, 1.7385e-31], - [0.0000e+00, 1.3118e-10, 0.0000e+00]], + tensor([[[2.9724e-31, 0.0000e+00, 3.0504e-31], + [0.0000e+00, 1.1210e-43, 0.0000e+00]], - [[1.6669e-06, 4.5723e-41, 1.3118e-10], - [0.0000e+00, 9.1477e-41, 0.0000e+00]]]) + [[8.9683e-44, 0.0000e+00, 8.1122e+00], + [0.0000e+00, 0.0000e+00, 0.0000e+00]]]) torch.Size([2, 2, 3]) - tensor([[[-6.0079e-36, 4.5723e-41, 5.0333e-13], - [ 0.0000e+00, 4.4842e-44, 0.0000e+00]], + tensor([[[2.9724e-31, 0.0000e+00, 0.0000e+00], + [1.4013e-45, 0.0000e+00, 0.0000e+00]], - [[ 4.4842e-44, 0.0000e+00, 0.0000e+00], - [ 0.0000e+00, 0.0000e+00, 0.0000e+00]]]) + [[1.5835e-43, 0.0000e+00, 8.1122e+00], + [0.0000e+00, 1.7808e-06, 0.0000e+00]]]) torch.Size([2, 2, 3]) tensor([[[0., 0., 0.], [0., 0., 0.]], @@ -1777,7 +1777,7 @@ are reflected in the other: .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.127 seconds) + **Total running time of the script:** ( 0 minutes 0.142 seconds) .. _sphx_glr_download_beginner_introyt_tensors_deeper_tutorial.py: diff --git a/_sources/beginner/introyt/trainingyt.rst.txt b/_sources/beginner/introyt/trainingyt.rst.txt index 6055a3a9a8..20b14f2c56 100644 --- a/_sources/beginner/introyt/trainingyt.rst.txt +++ b/_sources/beginner/introyt/trainingyt.rst.txt @@ -127,64 +127,47 @@ and download both training and validation data splits. Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz to ./data/FashionMNIST/raw/train-images-idx3-ubyte.gz 0%| | 0/26421880 [00:00 + @@ -458,7 +458,7 @@ Pick up some real data and do a comparison! .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 10.531 seconds) + **Total running time of the script:** ( 0 minutes 11.885 seconds) .. _sphx_glr_download_beginner_nlp_advanced_tutorial.py: diff --git a/_sources/beginner/nlp/deep_learning_tutorial.rst.txt b/_sources/beginner/nlp/deep_learning_tutorial.rst.txt index 2bbae52ccb..036fd8305f 100644 --- a/_sources/beginner/nlp/deep_learning_tutorial.rst.txt +++ b/_sources/beginner/nlp/deep_learning_tutorial.rst.txt @@ -73,7 +73,7 @@ output below is the mapping of the :math:`i`'th row of the input under .. code-block:: none - + @@ -561,7 +561,7 @@ has to offer. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.245 seconds) + **Total running time of the script:** ( 0 minutes 0.237 seconds) .. _sphx_glr_download_beginner_nlp_deep_learning_tutorial.py: diff --git a/_sources/beginner/nlp/pytorch_tutorial.rst.txt b/_sources/beginner/nlp/pytorch_tutorial.rst.txt index b15c955fd9..70ebfe471b 100644 --- a/_sources/beginner/nlp/pytorch_tutorial.rst.txt +++ b/_sources/beginner/nlp/pytorch_tutorial.rst.txt @@ -49,7 +49,7 @@ let's look what we can do with tensors. .. code-block:: none - + @@ -386,7 +386,7 @@ created. Let's see it in action. .. code-block:: none tensor([5., 7., 9.], grad_fn=) - + @@ -421,7 +421,7 @@ But how does that help us compute a gradient? .. code-block:: none tensor(21., grad_fn=) - + @@ -536,7 +536,7 @@ successful programmer in deep learning. False False None - + True None @@ -578,7 +578,7 @@ with ``.requires_grad=True`` by wrapping the code block in .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.023 seconds) + **Total running time of the script:** ( 0 minutes 0.024 seconds) .. _sphx_glr_download_beginner_nlp_pytorch_tutorial.py: diff --git a/_sources/beginner/nlp/sequence_models_tutorial.rst.txt b/_sources/beginner/nlp/sequence_models_tutorial.rst.txt index 4c2367c295..e8059c033e 100644 --- a/_sources/beginner/nlp/sequence_models_tutorial.rst.txt +++ b/_sources/beginner/nlp/sequence_models_tutorial.rst.txt @@ -90,7 +90,7 @@ Let's see a quick example. .. code-block:: none - + @@ -376,7 +376,7 @@ this LSTM. Hints: .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.817 seconds) + **Total running time of the script:** ( 0 minutes 0.781 seconds) .. _sphx_glr_download_beginner_nlp_sequence_models_tutorial.py: diff --git a/_sources/beginner/nlp/word_embeddings_tutorial.rst.txt b/_sources/beginner/nlp/word_embeddings_tutorial.rst.txt index 9673bfa09a..54e9318ba4 100644 --- a/_sources/beginner/nlp/word_embeddings_tutorial.rst.txt +++ b/_sources/beginner/nlp/word_embeddings_tutorial.rst.txt @@ -195,7 +195,7 @@ indices are integers, not floats). .. code-block:: none - + @@ -343,9 +343,9 @@ examples and update the parameters with backpropagation. .. code-block:: none [(['forty', 'When'], 'winters'), (['winters', 'forty'], 'shall'), (['shall', 'winters'], 'besiege')] - [521.1830098628998, 518.5512182712555, 515.939252614975, 513.3469393253326, 510.77273416519165, 508.215532541275, 505.67491912841797, 503.14874482154846, 500.6369118690491, 498.1383876800537] - tensor([-0.4274, 1.2996, -1.0183, -0.8543, -1.3621, -0.1738, 1.5375, -1.1690, - -1.0065, -1.1262], grad_fn=) + [519.2318260669708, 516.6196131706238, 514.0248794555664, 511.4472498893738, 508.88449597358704, 506.3354563713074, 503.8009901046753, 501.2801191806793, 498.7711420059204, 496.27349042892456] + tensor([ 0.4877, -0.3100, -3.0144, -1.2486, 1.3481, 0.2685, -1.1267, -0.5996, + 1.8354, -1.0729], grad_fn=) @@ -439,14 +439,14 @@ tips: [(['are', 'We', 'to', 'study'], 'about'), (['about', 'are', 'study', 'the'], 'to'), (['to', 'about', 'the', 'idea'], 'study'), (['study', 'to', 'idea', 'of'], 'the'), (['the', 'study', 'of', 'a'], 'idea')] - tensor([27, 38, 0, 10]) + tensor([ 9, 5, 2, 36]) .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.803 seconds) + **Total running time of the script:** ( 0 minutes 0.814 seconds) .. _sphx_glr_download_beginner_nlp_word_embeddings_tutorial.py: diff --git a/_sources/beginner/nn_tutorial.rst.txt b/_sources/beginner/nn_tutorial.rst.txt index d55c4f7c81..16af0a248f 100644 --- a/_sources/beginner/nn_tutorial.rst.txt +++ b/_sources/beginner/nn_tutorial.rst.txt @@ -1648,8 +1648,8 @@ You should find it runs faster now: .. code-block:: none - 0 0.18156248867511748 - 1 0.17109674872159958 + 0 0.18240425955057143 + 1 0.16882807456254958 @@ -1692,7 +1692,7 @@ what we've seen: .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 36.891 seconds) + **Total running time of the script:** ( 0 minutes 35.594 seconds) .. _sphx_glr_download_beginner_nn_tutorial.py: diff --git a/_sources/beginner/onnx/export_simple_model_to_onnx_tutorial.rst.txt b/_sources/beginner/onnx/export_simple_model_to_onnx_tutorial.rst.txt index bc745827ee..58d824df79 100644 --- a/_sources/beginner/onnx/export_simple_model_to_onnx_tutorial.rst.txt +++ b/_sources/beginner/onnx/export_simple_model_to_onnx_tutorial.rst.txt @@ -344,7 +344,7 @@ sit tight and have fun going through all of them to learn all there is about the .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.844 seconds) + **Total running time of the script:** ( 0 minutes 0.824 seconds) .. _sphx_glr_download_beginner_onnx_export_simple_model_to_onnx_tutorial.py: diff --git a/_sources/beginner/template_tutorial.rst.txt b/_sources/beginner/template_tutorial.rst.txt index 4792d702de..00cb08b985 100644 --- a/_sources/beginner/template_tutorial.rst.txt +++ b/_sources/beginner/template_tutorial.rst.txt @@ -136,7 +136,7 @@ Further Reading .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.020 seconds) + **Total running time of the script:** ( 0 minutes 0.023 seconds) .. _sphx_glr_download_beginner_template_tutorial.py: diff --git a/_sources/beginner/transfer_learning_tutorial.rst.txt b/_sources/beginner/transfer_learning_tutorial.rst.txt index fb638dade3..3c9d04f4e6 100644 --- a/_sources/beginner/transfer_learning_tutorial.rst.txt +++ b/_sources/beginner/transfer_learning_tutorial.rst.txt @@ -80,7 +80,7 @@ These two major transfer learning scenarios look as follows: .. code-block:: none - + @@ -378,8 +378,8 @@ Load a pretrained model and reset final fully connected layer. 0%| | 0.00/44.7M [00:000 0 0_0 - FAILED + COMPLETED Sobol - NaN - NaN - NaN + False + 16810.0 + 0.912261 19 66 0.003182 @@ -1635,7 +1642,7 @@ an easy way to sanity check the optimization. Sobol False 21926.0 - 0.874502 + 0.884336 23 118 0.000145 @@ -1651,7 +1658,7 @@ an easy way to sanity check the optimization. Sobol True 37560.0 - 0.948988 + 0.950008 40 124 0.002745 @@ -1667,7 +1674,7 @@ an easy way to sanity check the optimization. Sobol False 14756.0 - 0.885349 + 0.889388 18 23 0.000166 @@ -1683,7 +1690,7 @@ an easy way to sanity check the optimization. Sobol True 71630.0 - 0.950119 + 0.950269 80 99 0.000642 @@ -1699,7 +1706,7 @@ an easy way to sanity check the optimization. Sobol False 13948.0 - 0.921245 + 0.924378 16 54 0.000444 @@ -1715,7 +1722,7 @@ an easy way to sanity check the optimization. Sobol False 24686.0 - 0.870814 + 0.866699 29 50 0.000177 @@ -1731,7 +1738,7 @@ an easy way to sanity check the optimization. Sobol False 18290.0 - 0.873637 + 0.877507 20 87 0.000119 @@ -1747,7 +1754,7 @@ an easy way to sanity check the optimization. Sobol False 20996.0 - 0.830258 + 0.836515 26 17 0.005245 @@ -1760,16 +1767,16 @@ an easy way to sanity check the optimization. 9 9_0 COMPLETED - Sobol - False - 95085.0 - 0.955960 - 105 - 111 - 0.002577 - 2 - 0.403652 - 128 + BoTorch + True + 38908.0 + 0.957411 + 42 + 115 + 0.001722 + 3 + 0.129546 + 64 @@ -1820,14 +1827,14 @@ validation accuracy. The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation. - [WARNING 07-10 20:25:30] ax.service.utils.report_utils: Column reason missing for all trials. Not appending column. + [WARNING 07-15 17:11:35] ax.service.utils.report_utils: Column reason missing for all trials. Not appending column. .. raw:: html
-
+


@@ -1872,7 +1879,7 @@ much easier to model than the validation accuracy (``val_acc``) metric.
-
+


@@ -1905,7 +1912,7 @@ as the hidden sizes increase.
-
+


@@ -1934,7 +1941,7 @@ is much larger).
-
+


@@ -1951,7 +1958,7 @@ for their help with integrating TorchX with Ax. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 16 minutes 15.239 seconds) + **Total running time of the script:** ( 16 minutes 58.398 seconds) .. _sphx_glr_download_intermediate_ax_multiobjective_nas_tutorial.py: diff --git a/_sources/intermediate/char_rnn_classification_tutorial.rst.txt b/_sources/intermediate/char_rnn_classification_tutorial.rst.txt index 19e7540caf..b713be2b38 100644 --- a/_sources/intermediate/char_rnn_classification_tutorial.rst.txt +++ b/_sources/intermediate/char_rnn_classification_tutorial.rst.txt @@ -595,25 +595,25 @@ average of the loss. .. code-block:: none 5000 5% (0m 34s) 2.2208 Horigome / Japanese ✓ - 10000 10% (1m 8s) 1.6752 Miazga / Japanese ✗ (Polish) - 15000 15% (1m 44s) 0.1778 Yukhvidov / Russian ✓ - 20000 20% (2m 19s) 1.5856 Mclaughlin / Irish ✗ (Scottish) - 25000 25% (2m 54s) 0.6552 Banh / Vietnamese ✓ - 30000 30% (3m 29s) 1.5547 Machado / Japanese ✗ (Portuguese) - 35000 35% (4m 4s) 0.0168 Fotopoulos / Greek ✓ - 40000 40% (4m 39s) 1.1464 Quirke / Irish ✓ - 45000 45% (5m 13s) 1.7532 Reier / French ✗ (German) - 50000 50% (5m 48s) 0.8413 Hou / Chinese ✓ - 55000 55% (6m 23s) 0.8587 Duan / Vietnamese ✗ (Chinese) - 60000 60% (6m 58s) 0.2047 Giang / Vietnamese ✓ - 65000 65% (7m 32s) 2.5534 Cober / French ✗ (Czech) - 70000 70% (8m 7s) 1.5163 Mateus / Arabic ✗ (Portuguese) - 75000 75% (8m 42s) 0.2217 Hamilton / Scottish ✓ - 80000 80% (9m 16s) 0.4456 Maessen / Dutch ✓ - 85000 85% (9m 51s) 0.0239 Gan / Chinese ✓ - 90000 90% (10m 26s) 0.0521 Bellomi / Italian ✓ - 95000 95% (11m 0s) 0.0867 Vozgov / Russian ✓ - 100000 100% (11m 35s) 0.2730 Tong / Vietnamese ✓ + 10000 10% (1m 10s) 1.6752 Miazga / Japanese ✗ (Polish) + 15000 15% (1m 46s) 0.1778 Yukhvidov / Russian ✓ + 20000 20% (2m 21s) 1.5856 Mclaughlin / Irish ✗ (Scottish) + 25000 25% (2m 57s) 0.6552 Banh / Vietnamese ✓ + 30000 30% (3m 32s) 1.5547 Machado / Japanese ✗ (Portuguese) + 35000 35% (4m 8s) 0.0168 Fotopoulos / Greek ✓ + 40000 40% (4m 43s) 1.1464 Quirke / Irish ✓ + 45000 45% (5m 19s) 1.7532 Reier / French ✗ (German) + 50000 50% (5m 54s) 0.8413 Hou / Chinese ✓ + 55000 55% (6m 29s) 0.8587 Duan / Vietnamese ✗ (Chinese) + 60000 60% (7m 5s) 0.2047 Giang / Vietnamese ✓ + 65000 65% (7m 41s) 2.5534 Cober / French ✗ (Czech) + 70000 70% (8m 16s) 1.5163 Mateus / Arabic ✗ (Portuguese) + 75000 75% (8m 51s) 0.2217 Hamilton / Scottish ✓ + 80000 80% (9m 27s) 0.4456 Maessen / Dutch ✓ + 85000 85% (10m 2s) 0.0239 Gan / Chinese ✓ + 90000 90% (10m 37s) 0.0521 Bellomi / Italian ✓ + 95000 95% (11m 13s) 0.0867 Vozgov / Russian ✓ + 100000 100% (11m 49s) 0.2730 Tong / Vietnamese ✓ @@ -653,7 +653,7 @@ learning: .. code-block:: none - [] + [] @@ -844,7 +844,7 @@ Exercises .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 11 minutes 48.189 seconds) + **Total running time of the script:** ( 12 minutes 1.675 seconds) .. _sphx_glr_download_intermediate_char_rnn_classification_tutorial.py: diff --git a/_sources/intermediate/char_rnn_generation_tutorial.rst.txt b/_sources/intermediate/char_rnn_generation_tutorial.rst.txt index 1cdbda9c57..f2f79c38c7 100644 --- a/_sources/intermediate/char_rnn_generation_tutorial.rst.txt +++ b/_sources/intermediate/char_rnn_generation_tutorial.rst.txt @@ -464,26 +464,26 @@ in ``all_losses`` for plotting later. .. code-block:: none - 0m 38s (5000 5%) 3.1506 - 1m 17s (10000 10%) 2.5070 - 1m 57s (15000 15%) 3.3047 - 2m 36s (20000 20%) 2.4247 - 3m 16s (25000 25%) 2.6406 - 3m 56s (30000 30%) 2.0266 - 4m 36s (35000 35%) 2.6520 - 5m 15s (40000 40%) 2.4261 - 5m 55s (45000 45%) 2.2302 - 6m 34s (50000 50%) 1.6496 - 7m 13s (55000 55%) 2.7101 - 7m 53s (60000 60%) 2.5396 - 8m 33s (65000 65%) 2.5978 - 9m 12s (70000 70%) 1.6029 - 9m 52s (75000 75%) 0.9634 - 10m 32s (80000 80%) 3.0950 - 11m 11s (85000 85%) 2.0512 - 11m 51s (90000 90%) 2.5302 - 12m 31s (95000 95%) 3.2365 - 13m 11s (100000 100%) 1.7113 + 0m 40s (5000 5%) 3.1506 + 1m 21s (10000 10%) 2.5070 + 2m 2s (15000 15%) 3.3047 + 2m 43s (20000 20%) 2.4247 + 3m 25s (25000 25%) 2.6406 + 4m 6s (30000 30%) 2.0266 + 4m 47s (35000 35%) 2.6520 + 5m 28s (40000 40%) 2.4261 + 6m 13s (45000 45%) 2.2302 + 6m 54s (50000 50%) 1.6496 + 7m 35s (55000 55%) 2.7101 + 8m 17s (60000 60%) 2.5396 + 8m 58s (65000 65%) 2.5978 + 9m 39s (70000 70%) 1.6029 + 10m 20s (75000 75%) 0.9634 + 11m 1s (80000 80%) 3.0950 + 11m 42s (85000 85%) 2.0512 + 12m 23s (90000 90%) 2.5302 + 13m 4s (95000 95%) 3.2365 + 13m 46s (100000 100%) 1.7113 @@ -522,7 +522,7 @@ learning: .. code-block:: none - [] + [] @@ -641,7 +641,7 @@ Exercises .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 13 minutes 11.262 seconds) + **Total running time of the script:** ( 13 minutes 46.615 seconds) .. _sphx_glr_download_intermediate_char_rnn_generation_tutorial.py: diff --git a/_sources/intermediate/custom_function_conv_bn_tutorial.rst.txt b/_sources/intermediate/custom_function_conv_bn_tutorial.rst.txt index 8aef6ab943..8ab4704708 100644 --- a/_sources/intermediate/custom_function_conv_bn_tutorial.rst.txt +++ b/_sources/intermediate/custom_function_conv_bn_tutorial.rst.txt @@ -568,22 +568,22 @@ allocate one fewer buffer per fused ``conv-bn`` pair. Test set: Average loss: 0.4197, Accuracy: 8681/10000 (87%) Train Epoch: 0 [0/60000 (0%)] Loss: 2.349030 - Train Epoch: 0 [4096/60000 (7%)] Loss: 7.435157 - Train Epoch: 0 [8192/60000 (13%)] Loss: 5.443536 - Train Epoch: 0 [12288/60000 (20%)] Loss: 2.457860 - Train Epoch: 0 [16384/60000 (27%)] Loss: 1.739214 - Train Epoch: 0 [20480/60000 (33%)] Loss: 1.448293 - Train Epoch: 0 [24576/60000 (40%)] Loss: 1.312150 - Train Epoch: 0 [28672/60000 (47%)] Loss: 1.144980 - Train Epoch: 0 [32768/60000 (53%)] Loss: 1.498813 - Train Epoch: 0 [36864/60000 (60%)] Loss: 1.253350 - Train Epoch: 0 [40960/60000 (67%)] Loss: 1.057278 - Train Epoch: 0 [45056/60000 (73%)] Loss: 0.879628 - Train Epoch: 0 [49152/60000 (80%)] Loss: 0.832816 - Train Epoch: 0 [53248/60000 (87%)] Loss: 0.732052 - Train Epoch: 0 [57344/60000 (93%)] Loss: 0.759647 - - Test set: Average loss: 0.4496, Accuracy: 8660/10000 (87%) + Train Epoch: 0 [4096/60000 (7%)] Loss: 7.435156 + Train Epoch: 0 [8192/60000 (13%)] Loss: 5.443528 + Train Epoch: 0 [12288/60000 (20%)] Loss: 2.457769 + Train Epoch: 0 [16384/60000 (27%)] Loss: 1.739290 + Train Epoch: 0 [20480/60000 (33%)] Loss: 1.450540 + Train Epoch: 0 [24576/60000 (40%)] Loss: 1.314012 + Train Epoch: 0 [28672/60000 (47%)] Loss: 1.135046 + Train Epoch: 0 [32768/60000 (53%)] Loss: 1.407557 + Train Epoch: 0 [36864/60000 (60%)] Loss: 1.204462 + Train Epoch: 0 [40960/60000 (67%)] Loss: 0.926590 + Train Epoch: 0 [45056/60000 (73%)] Loss: 0.843972 + Train Epoch: 0 [49152/60000 (80%)] Loss: 0.890412 + Train Epoch: 0 [53248/60000 (87%)] Loss: 0.801767 + Train Epoch: 0 [57344/60000 (93%)] Loss: 0.709654 + + Test set: Average loss: 0.3548, Accuracy: 9099/10000 (91%) cuDNN version: 8902 @@ -598,7 +598,7 @@ allocate one fewer buffer per fused ``conv-bn`` pair. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 37.909 seconds) + **Total running time of the script:** ( 0 minutes 37.803 seconds) .. _sphx_glr_download_intermediate_custom_function_conv_bn_tutorial.py: diff --git a/_sources/intermediate/dqn_with_rnn_tutorial.rst.txt b/_sources/intermediate/dqn_with_rnn_tutorial.rst.txt index a5b8fff43f..d7ceea5933 100644 --- a/_sources/intermediate/dqn_with_rnn_tutorial.rst.txt +++ b/_sources/intermediate/dqn_with_rnn_tutorial.rst.txt @@ -805,15 +805,15 @@ every 50 data collection, and plot the results after training. device=cpu, is_shared=False) - 0%| | 50/1000000 [00:00<2:25:09, 114.81it/s] - 0%| | 50/1000000 [00:11<2:25:09, 114.81it/s] - steps: 16, loss_val: 0.0002, action_spread: tensor([39, 11]): 0%| | 50/1000000 [00:26<2:25:09, 114.81it/s] - steps: 16, loss_val: 0.0002, action_spread: tensor([39, 11]): 0%| | 100/1000000 [00:27<88:41:41, 3.13it/s] - steps: 16, loss_val: 0.0002, action_spread: tensor([ 3, 47]): 0%| | 100/1000000 [00:53<88:41:41, 3.13it/s] - steps: 16, loss_val: 0.0002, action_spread: tensor([ 3, 47]): 0%| | 150/1000000 [00:54<116:05:23, 2.39it/s] - steps: 18, loss_val: 0.0002, action_spread: tensor([38, 12]): 0%| | 150/1000000 [01:20<116:05:23, 2.39it/s] - steps: 18, loss_val: 0.0002, action_spread: tensor([38, 12]): 0%| | 200/1000000 [01:20<128:53:02, 2.15it/s] - steps: 18, loss_val: 0.0004, action_spread: tensor([ 8, 42]): 0%| | 200/1000000 [01:46<128:53:02, 2.15it/s] + 0%| | 50/1000000 [00:00<2:32:54, 109.00it/s] + 0%| | 50/1000000 [00:11<2:32:54, 109.00it/s] + steps: 14, loss_val: 0.0005, action_spread: tensor([ 9, 41]): 0%| | 50/1000000 [00:27<2:32:54, 109.00it/s] + steps: 14, loss_val: 0.0005, action_spread: tensor([ 9, 41]): 0%| | 100/1000000 [00:28<92:52:05, 2.99it/s] + steps: 14, loss_val: 0.0007, action_spread: tensor([45, 5]): 0%| | 100/1000000 [00:55<92:52:05, 2.99it/s] + steps: 14, loss_val: 0.0007, action_spread: tensor([45, 5]): 0%| | 150/1000000 [00:56<121:12:31, 2.29it/s] + steps: 14, loss_val: 0.0008, action_spread: tensor([ 7, 43]): 0%| | 150/1000000 [01:23<121:12:31, 2.29it/s] + steps: 14, loss_val: 0.0008, action_spread: tensor([ 7, 43]): 0%| | 200/1000000 [01:24<134:32:51, 2.06it/s] + steps: 14, loss_val: 0.0007, action_spread: tensor([44, 6]): 0%| | 200/1000000 [01:51<134:32:51, 2.06it/s] @@ -869,7 +869,7 @@ Further Reading .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 1 minutes 51.942 seconds) + **Total running time of the script:** ( 1 minutes 57.686 seconds) .. _sphx_glr_download_intermediate_dqn_with_rnn_tutorial.py: diff --git a/_sources/intermediate/ensembling.rst.txt b/_sources/intermediate/ensembling.rst.txt index ceb7d65ee3..05dae1955c 100644 --- a/_sources/intermediate/ensembling.rst.txt +++ b/_sources/intermediate/ensembling.rst.txt @@ -306,13 +306,13 @@ Curious about performance numbers? Here's how the numbers look. .. code-block:: none - Predictions without vmap + Predictions without vmap [model(minibatch) for model, minibatch in zip(models, minibatches)] - 2.26 ms + 2.30 ms 1 measurement, 100 runs , 1 thread - Predictions with vmap + Predictions with vmap vmap(fmodel)(params, buffers, minibatches) - 853.78 us + 869.21 us 1 measurement, 100 runs , 1 thread @@ -332,7 +332,7 @@ on GitHub. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.876 seconds) + **Total running time of the script:** ( 0 minutes 0.903 seconds) .. _sphx_glr_download_intermediate_ensembling.py: diff --git a/_sources/intermediate/forward_ad_usage.rst.txt b/_sources/intermediate/forward_ad_usage.rst.txt index a8b76445a3..ad58f925a6 100644 --- a/_sources/intermediate/forward_ad_usage.rst.txt +++ b/_sources/intermediate/forward_ad_usage.rst.txt @@ -370,7 +370,7 @@ to the module. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.108 seconds) + **Total running time of the script:** ( 0 minutes 0.105 seconds) .. _sphx_glr_download_intermediate_forward_ad_usage.py: diff --git a/_sources/intermediate/fx_profiling_tutorial.rst.txt b/_sources/intermediate/fx_profiling_tutorial.rst.txt index 944bf0c8ec..2aa8bce698 100644 --- a/_sources/intermediate/fx_profiling_tutorial.rst.txt +++ b/_sources/intermediate/fx_profiling_tutorial.rst.txt @@ -477,77 +477,77 @@ characteristics of our ResNet18 model; Op type Op Average runtime (s) Pct total runtime ------------- --------------------- --------------------- ------------------- - call_module maxpool 0.00578356 8.80214 - call_module conv1 0.0048728 7.41604 - call_module layer4_0_conv2 0.00385141 5.86156 - call_module layer1_0_conv1 0.00342798 5.21713 - call_module layer1_1_conv2 0.00329804 5.01938 - call_module layer4_1_conv1 0.00325108 4.94789 - call_module layer4_1_conv2 0.00317097 4.82597 - call_module layer1_1_conv1 0.00294828 4.48707 - call_module layer2_1_conv1 0.00293827 4.47183 - call_module layer3_1_conv2 0.0027945 4.25303 - call_module layer3_0_conv2 0.00270867 4.1224 - call_module layer3_1_conv1 0.00267005 4.06362 - call_module layer1_0_conv2 0.00263691 4.01318 - call_module layer2_1_conv2 0.00256205 3.89924 - call_module layer2_0_conv2 0.00243831 3.71092 - call_module layer4_0_conv1 0.00232697 3.54147 - call_module layer3_0_conv1 0.00177383 2.69964 - call_module bn1 0.00156283 2.37852 - call_module layer2_0_conv1 0.00151324 2.30304 - call_module layer2_0_downsample_0 0.00102329 1.55737 - call_function add 0.000611544 0.930724 - call_module layer4_0_downsample_0 0.00047946 0.729702 - call_module layer3_0_downsample_0 0.000478983 0.728976 - call_function add_1 0.000436068 0.663662 - call_function add_3 0.000254154 0.386804 - call_module relu 0.000243664 0.370838 - call_module layer1_0_bn1 0.000205278 0.312418 - call_module fc 0.000187397 0.285204 - call_module layer1_1_bn2 0.000183344 0.279036 - call_module layer1_1_bn1 0.000168324 0.256176 - call_module layer1_0_bn2 0.000164747 0.250733 - call_module avgpool 0.00015974 0.243113 - call_module layer2_1_bn1 0.000144958 0.220616 - call_module layer2_0_downsample_1 0.000143528 0.218439 - call_module layer2_0_bn2 0.000140905 0.214447 - call_module layer2_0_bn1 0.000138044 0.210093 - call_module layer3_1_bn1 0.000135899 0.206827 - call_module layer3_1_bn2 0.000133991 0.203925 - call_module layer2_1_bn2 0.000133514 0.203199 - call_module layer4_0_bn2 0.000129461 0.19703 - call_module layer4_0_bn1 0.000127554 0.194128 - call_module layer3_0_bn1 0.000127316 0.193765 - call_module layer4_1_bn1 0.000126839 0.193039 - call_module layer3_0_bn2 0.00012517 0.190499 - call_module layer3_0_downsample_1 0.000124693 0.189773 - call_module layer4_1_bn2 0.000123501 0.187959 - call_module layer4_0_downsample_1 0.000121355 0.184693 - call_module layer1_0_relu 0.000119686 0.182153 - call_module layer1_0_relu_1 0.000113964 0.173445 - call_module layer1_1_relu_1 0.00010705 0.162922 - call_module layer1_1_relu 0.000105381 0.160382 - call_module layer4_1_relu 8.9407e-05 0.136071 - call_module layer4_0_relu 8.91685e-05 0.135708 - call_function add_2 8.7738e-05 0.133531 - call_module layer2_0_relu 8.70228e-05 0.132442 - call_module layer2_1_relu 8.67844e-05 0.132079 - call_module layer2_1_relu_1 8.41618e-05 0.128088 - call_module layer2_0_relu_1 8.29697e-05 0.126274 - call_module layer3_1_relu 8.10623e-05 0.123371 - call_function add_6 8.05855e-05 0.122645 - call_module layer3_0_relu 7.9155e-05 0.120468 - call_module layer3_0_relu_1 7.67708e-05 0.116839 - call_module layer4_0_relu_1 7.67708e-05 0.116839 - call_function add_5 7.65324e-05 0.116477 - call_function add_7 7.60555e-05 0.115751 - call_module layer4_1_relu_1 7.53403e-05 0.114662 - call_function add_4 7.48634e-05 0.113937 - call_module layer3_1_relu_1 7.48634e-05 0.113937 - call_function flatten 4.62532e-05 0.0703939 - placeholder x 2.7895e-05 0.0424541 - output output 2.0504e-05 0.0312056 + call_module maxpool 0.00572634 8.98144 + call_module conv1 0.0046947 7.36338 + call_module layer4_0_conv2 0.00365067 5.72587 + call_module layer1_0_conv1 0.00333953 5.23787 + call_module layer1_1_conv1 0.0031848 4.99518 + call_module layer4_1_conv2 0.00309253 4.85046 + call_module layer4_1_conv1 0.0030818 4.83363 + call_module layer1_0_conv2 0.0029211 4.58159 + call_module layer1_1_conv2 0.00292039 4.58047 + call_module layer2_1_conv1 0.0028584 4.48324 + call_module layer3_0_conv2 0.00282025 4.42341 + call_module layer2_1_conv2 0.0026958 4.22821 + call_module layer3_1_conv1 0.00264549 4.14931 + call_module layer3_1_conv2 0.00258827 4.05956 + call_module layer2_0_conv2 0.00241852 3.79331 + call_module layer4_0_conv1 0.00218701 3.43021 + call_module layer3_0_conv1 0.00174046 2.72981 + call_module layer2_0_conv1 0.00172305 2.70251 + call_module bn1 0.00144958 2.27359 + call_function add 0.000527859 0.827917 + call_module layer2_0_downsample_0 0.000480175 0.753128 + call_module layer4_0_downsample_0 0.000460148 0.721717 + call_module layer3_0_downsample_0 0.00045228 0.709376 + call_function add_1 0.000420809 0.660015 + call_function add_3 0.000256777 0.40274 + call_module relu 0.000216722 0.339917 + call_module layer1_0_bn1 0.000186205 0.292052 + call_module fc 0.000180483 0.283077 + call_module layer1_1_bn1 0.000178099 0.279338 + call_module layer1_1_bn2 0.00015378 0.241195 + call_module layer1_0_bn2 0.000153542 0.240821 + call_module avgpool 0.000146389 0.229603 + call_module layer2_0_bn1 0.000126839 0.198939 + call_module layer2_0_bn2 0.000125408 0.196696 + call_module layer3_1_bn2 0.000124454 0.1952 + call_module layer2_1_bn1 0.000123739 0.194078 + call_module layer4_0_downsample_1 0.000123024 0.192956 + call_module layer2_1_bn2 0.000122786 0.192582 + call_module layer2_0_downsample_1 0.000121832 0.191087 + call_module layer3_1_bn1 0.000118732 0.186225 + call_module layer3_0_bn1 0.000118256 0.185477 + call_module layer3_0_downsample_1 0.000117779 0.18473 + call_module layer4_0_bn1 0.000116825 0.183234 + call_module layer4_0_bn2 0.00011611 0.182112 + call_module layer3_0_bn2 0.000115633 0.181364 + call_module layer4_1_bn1 0.000114918 0.180242 + call_module layer4_1_bn2 0.000114679 0.179868 + call_module layer1_0_relu 0.000111341 0.174633 + call_module layer1_1_relu 0.000104904 0.164536 + call_module layer1_0_relu_1 0.000102282 0.160423 + call_module layer1_1_relu_1 9.799e-05 0.153692 + call_function add_2 8.15392e-05 0.12789 + call_module layer4_0_relu 8.15392e-05 0.12789 + call_module layer4_1_relu 8.01086e-05 0.125646 + call_module layer2_0_relu 7.98702e-05 0.125272 + call_module layer2_1_relu 7.86781e-05 0.123402 + call_module layer2_1_relu_1 7.82013e-05 0.122654 + call_module layer2_0_relu_1 7.53403e-05 0.118167 + call_module layer3_0_relu 7.31945e-05 0.114802 + call_module layer3_1_relu 7.29561e-05 0.114428 + call_module layer3_0_relu_1 7.12872e-05 0.11181 + call_module layer3_1_relu_1 6.96182e-05 0.109192 + call_module layer4_1_relu_1 6.96182e-05 0.109192 + call_module layer4_0_relu_1 6.93798e-05 0.108818 + call_function add_5 6.81877e-05 0.106949 + call_function add_7 6.60419e-05 0.103583 + call_function add_4 6.4373e-05 0.100966 + call_function add_6 6.38962e-05 0.100218 + call_function flatten 4.05312e-05 0.0635709 + placeholder x 2.6226e-05 0.0411341 + output output 1.83582e-05 0.0287939 @@ -580,7 +580,7 @@ you might have. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.440 seconds) + **Total running time of the script:** ( 0 minutes 0.415 seconds) .. _sphx_glr_download_intermediate_fx_profiling_tutorial.py: diff --git a/_sources/intermediate/jacobians_hessians.rst.txt b/_sources/intermediate/jacobians_hessians.rst.txt index 4ff8e498b5..53d9d99853 100644 --- a/_sources/intermediate/jacobians_hessians.rst.txt +++ b/_sources/intermediate/jacobians_hessians.rst.txt @@ -261,13 +261,13 @@ And then run the performance comparison: .. code-block:: none - + compute_jac(xp) - 2.45 ms + 2.62 ms 1 measurement, 500 runs , 1 thread - + jacrev(predict, argnums=2)(weight, bias, x) - 666.25 us + 726.37 us 1 measurement, 500 runs , 1 thread @@ -292,7 +292,7 @@ Let's do a relative performance comparison of the above with our ``get_perf`` fu .. code-block:: none - Performance delta: 72.7706 percent improvement with vmap + Performance delta: 72.2710 percent improvement with vmap @@ -394,13 +394,13 @@ First, let's benchmark with more inputs than outputs: .. code-block:: none torch.Size([2048, 32]) - jacfwd time: + jacfwd time: jacfwd(predict, argnums=2)(weight, bias, x) - 1.23 ms + 1.31 ms 1 measurement, 500 runs , 1 thread - jacrev time: + jacrev time: jacrev(predict, argnums=2)(weight, bias, x) - 11.72 ms + 15.12 ms 1 measurement, 500 runs , 1 thread @@ -425,7 +425,7 @@ and then do a relative benchmark: .. code-block:: none - Performance delta: 850.2911 percent improvement with jacrev + Performance delta: 1055.0398 percent improvement with jacrev @@ -462,13 +462,13 @@ and now the reverse - more outputs (M) than inputs (N): .. code-block:: none - jacfwd time: + jacfwd time: jacfwd(predict, argnums=2)(weight, bias, x) - 6.31 ms + 7.56 ms 1 measurement, 500 runs , 1 thread - jacrev time: + jacrev time: jacrev(predict, argnums=2)(weight, bias, x) - 775.26 us + 859.57 us 1 measurement, 500 runs , 1 thread @@ -493,7 +493,7 @@ and a relative performance comparison: .. code-block:: none - Performance delta: 713.7954 percent improvement with jacfwd + Performance delta: 779.8225 percent improvement with jacfwd @@ -749,7 +749,7 @@ instead compose reverse-mode AD with reverse-mode AD: .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 12.738 seconds) + **Total running time of the script:** ( 0 minutes 15.583 seconds) .. _sphx_glr_download_intermediate_jacobians_hessians.py: diff --git a/_sources/intermediate/mario_rl_tutorial.rst.txt b/_sources/intermediate/mario_rl_tutorial.rst.txt index 2fd81728e7..cd3f41853d 100644 --- a/_sources/intermediate/mario_rl_tutorial.rst.txt +++ b/_sources/intermediate/mario_rl_tutorial.rst.txt @@ -993,9 +993,9 @@ his world, we suggest running the loop for at least 40,000 episodes! Using CUDA: True - Episode 0 - Step 163 - Epsilon 0.9999592508251706 - Mean Reward 635.0 - Mean Length 163.0 - Mean Loss 0.0 - Mean Q Value 0.0 - Time Delta 1.992 - Time 2024-07-10T20:26:32 - Episode 20 - Step 5007 - Epsilon 0.9987490329557962 - Mean Reward 667.429 - Mean Length 238.429 - Mean Loss 0.0 - Mean Q Value 0.0 - Time Delta 59.568 - Time 2024-07-10T20:27:32 - Episode 39 - Step 8854 - Epsilon 0.9977889477081997 - Mean Reward 656.6 - Mean Length 221.35 - Mean Loss 0.0 - Mean Q Value 0.0 - Time Delta 48.049 - Time 2024-07-10T20:28:20 + Episode 0 - Step 163 - Epsilon 0.9999592508251706 - Mean Reward 635.0 - Mean Length 163.0 - Mean Loss 0.0 - Mean Q Value 0.0 - Time Delta 2.039 - Time 2024-07-15T17:06:48 + Episode 20 - Step 5007 - Epsilon 0.9987490329557962 - Mean Reward 667.429 - Mean Length 238.429 - Mean Loss 0.0 - Mean Q Value 0.0 - Time Delta 60.689 - Time 2024-07-15T17:07:48 + Episode 39 - Step 8854 - Epsilon 0.9977889477081997 - Mean Reward 656.6 - Mean Length 221.35 - Mean Loss 0.0 - Mean Q Value 0.0 - Time Delta 48.662 - Time 2024-07-15T17:08:37 @@ -1012,7 +1012,7 @@ to train an AI to play any of the games at the `OpenAI gym + Per-sample-grads without vmap compute_sample_grads(data, targets) - 106.24 ms + 112.26 ms 1 measurement, 100 runs , 1 thread - Per-sample-grads with vmap + Per-sample-grads with vmap ft_compute_sample_grad(params, buffers, data, targets) - 8.81 ms + 8.97 ms 1 measurement, 100 runs , 1 thread - Performance delta: 1105.3591 percent improvement with vmap + Performance delta: 1151.0071 percent improvement with vmap @@ -419,7 +419,7 @@ at on GitHub. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 12.598 seconds) + **Total running time of the script:** ( 0 minutes 13.400 seconds) .. _sphx_glr_download_intermediate_per_sample_grads.py: diff --git a/_sources/intermediate/pruning_tutorial.rst.txt b/_sources/intermediate/pruning_tutorial.rst.txt index a65679106c..35775de69a 100644 --- a/_sources/intermediate/pruning_tutorial.rst.txt +++ b/_sources/intermediate/pruning_tutorial.rst.txt @@ -459,7 +459,7 @@ present. .. code-block:: none - OrderedDict([(0, )]) + OrderedDict([(0, )]) @@ -652,7 +652,7 @@ module attributes, and the module will now have two ``forward_pre_hooks``. .. code-block:: none - OrderedDict([(0, ), (1, )]) + OrderedDict([(0, ), (1, )]) @@ -762,7 +762,7 @@ pruning applied to the ``weight`` parameter. .. code-block:: none - [, ] + [, ] @@ -1359,7 +1359,7 @@ Let's try it out! .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.461 seconds) + **Total running time of the script:** ( 0 minutes 0.474 seconds) .. _sphx_glr_download_intermediate_pruning_tutorial.py: diff --git a/_sources/intermediate/reinforcement_ppo.rst.txt b/_sources/intermediate/reinforcement_ppo.rst.txt index 45b69b0410..c6cd26cc2d 100644 --- a/_sources/intermediate/reinforcement_ppo.rst.txt +++ b/_sources/intermediate/reinforcement_ppo.rst.txt @@ -1046,106 +1046,106 @@ The steps include: 0%| | 0/50000 [00:00 + @@ -124,7 +124,7 @@ network. Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz to ./MNIST/raw/train-images-idx3-ubyte.gz 0%| | 0/9912422 [00:00 ([l__self___features_pool0], 1) {} + call_function concated_features ([l__self___features_pool0], 1) {} call_module l__self___features_denseblock1_denselayer1_norm1 L__self___features_denseblock1_denselayer1_norm1 (concated_features,) {} call_module l__self___features_denseblock1_denselayer1_relu1 L__self___features_denseblock1_denselayer1_relu1 (l__self___features_denseblock1_denselayer1_norm1,) {} call_module bottleneck_output L__self___features_denseblock1_denselayer1_conv1 (l__self___features_denseblock1_denselayer1_relu1,) {} call_module l__self___features_denseblock1_denselayer1_norm2 L__self___features_denseblock1_denselayer1_norm2 (bottleneck_output,) {} call_module l__self___features_denseblock1_denselayer1_relu2 L__self___features_denseblock1_denselayer1_relu2 (l__self___features_denseblock1_denselayer1_norm2,) {} call_module new_features L__self___features_denseblock1_denselayer1_conv2 (l__self___features_denseblock1_denselayer1_relu2,) {} - call_function concated_features_1 ([l__self___features_pool0, new_features], 1) {} + call_function concated_features_1 ([l__self___features_pool0, new_features], 1) {} call_module l__self___features_denseblock1_denselayer2_norm1 L__self___features_denseblock1_denselayer2_norm1 (concated_features_1,) {} call_module l__self___features_denseblock1_denselayer2_relu1 L__self___features_denseblock1_denselayer2_relu1 (l__self___features_denseblock1_denselayer2_norm1,) {} call_module bottleneck_output_2 L__self___features_denseblock1_denselayer2_conv1 (l__self___features_denseblock1_denselayer2_relu1,) {} call_module l__self___features_denseblock1_denselayer2_norm2 L__self___features_denseblock1_denselayer2_norm2 (bottleneck_output_2,) {} call_module l__self___features_denseblock1_denselayer2_relu2 L__self___features_denseblock1_denselayer2_relu2 (l__self___features_denseblock1_denselayer2_norm2,) {} call_module new_features_2 L__self___features_denseblock1_denselayer2_conv2 (l__self___features_denseblock1_denselayer2_relu2,) {} - call_function concated_features_2 ([l__self___features_pool0, new_features, new_features_2], 1) {} + call_function concated_features_2 ([l__self___features_pool0, new_features, new_features_2], 1) {} call_module l__self___features_denseblock1_denselayer3_norm1 L__self___features_denseblock1_denselayer3_norm1 (concated_features_2,) {} call_module l__self___features_denseblock1_denselayer3_relu1 L__self___features_denseblock1_denselayer3_relu1 (l__self___features_denseblock1_denselayer3_norm1,) {} call_module bottleneck_output_4 L__self___features_denseblock1_denselayer3_conv1 (l__self___features_denseblock1_denselayer3_relu1,) {} call_module l__self___features_denseblock1_denselayer3_norm2 L__self___features_denseblock1_denselayer3_norm2 (bottleneck_output_4,) {} call_module l__self___features_denseblock1_denselayer3_relu2 L__self___features_denseblock1_denselayer3_relu2 (l__self___features_denseblock1_denselayer3_norm2,) {} call_module new_features_4 L__self___features_denseblock1_denselayer3_conv2 (l__self___features_denseblock1_denselayer3_relu2,) {} - call_function concated_features_3 ([l__self___features_pool0, new_features, new_features_2, new_features_4], 1) {} + call_function concated_features_3 ([l__self___features_pool0, new_features, new_features_2, new_features_4], 1) {} call_module l__self___features_denseblock1_denselayer4_norm1 L__self___features_denseblock1_denselayer4_norm1 (concated_features_3,) {} call_module l__self___features_denseblock1_denselayer4_relu1 L__self___features_denseblock1_denselayer4_relu1 (l__self___features_denseblock1_denselayer4_norm1,) {} call_module bottleneck_output_6 L__self___features_denseblock1_denselayer4_conv1 (l__self___features_denseblock1_denselayer4_relu1,) {} call_module l__self___features_denseblock1_denselayer4_norm2 L__self___features_denseblock1_denselayer4_norm2 (bottleneck_output_6,) {} call_module l__self___features_denseblock1_denselayer4_relu2 L__self___features_denseblock1_denselayer4_relu2 (l__self___features_denseblock1_denselayer4_norm2,) {} call_module new_features_6 L__self___features_denseblock1_denselayer4_conv2 (l__self___features_denseblock1_denselayer4_relu2,) {} - call_function concated_features_4 ([l__self___features_pool0, new_features, new_features_2, new_features_4, new_features_6], 1) {} + call_function concated_features_4 ([l__self___features_pool0, new_features, new_features_2, new_features_4, new_features_6], 1) {} call_module l__self___features_denseblock1_denselayer5_norm1 L__self___features_denseblock1_denselayer5_norm1 (concated_features_4,) {} call_module l__self___features_denseblock1_denselayer5_relu1 L__self___features_denseblock1_denselayer5_relu1 (l__self___features_denseblock1_denselayer5_norm1,) {} call_module bottleneck_output_8 L__self___features_denseblock1_denselayer5_conv1 (l__self___features_denseblock1_denselayer5_relu1,) {} call_module l__self___features_denseblock1_denselayer5_norm2 L__self___features_denseblock1_denselayer5_norm2 (bottleneck_output_8,) {} call_module l__self___features_denseblock1_denselayer5_relu2 L__self___features_denseblock1_denselayer5_relu2 (l__self___features_denseblock1_denselayer5_norm2,) {} call_module new_features_8 L__self___features_denseblock1_denselayer5_conv2 (l__self___features_denseblock1_denselayer5_relu2,) {} - call_function concated_features_5 ([l__self___features_pool0, new_features, new_features_2, new_features_4, new_features_6, new_features_8], 1) {} + call_function concated_features_5 ([l__self___features_pool0, new_features, new_features_2, new_features_4, new_features_6, new_features_8], 1) {} call_module l__self___features_denseblock1_denselayer6_norm1 L__self___features_denseblock1_denselayer6_norm1 (concated_features_5,) {} call_module l__self___features_denseblock1_denselayer6_relu1 L__self___features_denseblock1_denselayer6_relu1 (l__self___features_denseblock1_denselayer6_norm1,) {} call_module bottleneck_output_10 L__self___features_denseblock1_denselayer6_conv1 (l__self___features_denseblock1_denselayer6_relu1,) {} call_module l__self___features_denseblock1_denselayer6_norm2 L__self___features_denseblock1_denselayer6_norm2 (bottleneck_output_10,) {} call_module l__self___features_denseblock1_denselayer6_relu2 L__self___features_denseblock1_denselayer6_relu2 (l__self___features_denseblock1_denselayer6_norm2,) {} call_module new_features_10 L__self___features_denseblock1_denselayer6_conv2 (l__self___features_denseblock1_denselayer6_relu2,) {} - call_function cat_6 ([l__self___features_pool0, new_features, new_features_2, new_features_4, new_features_6, new_features_8, new_features_10], 1) {} + call_function cat_6 ([l__self___features_pool0, new_features, new_features_2, new_features_4, new_features_6, new_features_8, new_features_10], 1) {} call_module l__self___features_transition1_norm L__self___features_transition1_norm (cat_6,) {} call_module l__self___features_transition1_relu L__self___features_transition1_relu (l__self___features_transition1_norm,) {} call_module l__self___features_transition1_conv L__self___features_transition1_conv (l__self___features_transition1_relu,) {} call_module l__self___features_transition1_pool L__self___features_transition1_pool (l__self___features_transition1_conv,) {} - call_function concated_features_6 ([l__self___features_transition1_pool], 1) {} + call_function concated_features_6 ([l__self___features_transition1_pool], 1) {} call_module l__self___features_denseblock2_denselayer1_norm1 L__self___features_denseblock2_denselayer1_norm1 (concated_features_6,) {} call_module l__self___features_denseblock2_denselayer1_relu1 L__self___features_denseblock2_denselayer1_relu1 (l__self___features_denseblock2_denselayer1_norm1,) {} call_module bottleneck_output_12 L__self___features_denseblock2_denselayer1_conv1 (l__self___features_denseblock2_denselayer1_relu1,) {} call_module l__self___features_denseblock2_denselayer1_norm2 L__self___features_denseblock2_denselayer1_norm2 (bottleneck_output_12,) {} call_module l__self___features_denseblock2_denselayer1_relu2 L__self___features_denseblock2_denselayer1_relu2 (l__self___features_denseblock2_denselayer1_norm2,) {} call_module new_features_12 L__self___features_denseblock2_denselayer1_conv2 (l__self___features_denseblock2_denselayer1_relu2,) {} - call_function concated_features_7 ([l__self___features_transition1_pool, new_features_12], 1) {} + call_function concated_features_7 ([l__self___features_transition1_pool, new_features_12], 1) {} call_module l__self___features_denseblock2_denselayer2_norm1 L__self___features_denseblock2_denselayer2_norm1 (concated_features_7,) {} call_module l__self___features_denseblock2_denselayer2_relu1 L__self___features_denseblock2_denselayer2_relu1 (l__self___features_denseblock2_denselayer2_norm1,) {} call_module bottleneck_output_14 L__self___features_denseblock2_denselayer2_conv1 (l__self___features_denseblock2_denselayer2_relu1,) {} call_module l__self___features_denseblock2_denselayer2_norm2 L__self___features_denseblock2_denselayer2_norm2 (bottleneck_output_14,) {} call_module l__self___features_denseblock2_denselayer2_relu2 L__self___features_denseblock2_denselayer2_relu2 (l__self___features_denseblock2_denselayer2_norm2,) {} call_module new_features_14 L__self___features_denseblock2_denselayer2_conv2 (l__self___features_denseblock2_denselayer2_relu2,) {} - call_function concated_features_8 ([l__self___features_transition1_pool, new_features_12, new_features_14], 1) {} + call_function concated_features_8 ([l__self___features_transition1_pool, new_features_12, new_features_14], 1) {} call_module l__self___features_denseblock2_denselayer3_norm1 L__self___features_denseblock2_denselayer3_norm1 (concated_features_8,) {} call_module l__self___features_denseblock2_denselayer3_relu1 L__self___features_denseblock2_denselayer3_relu1 (l__self___features_denseblock2_denselayer3_norm1,) {} call_module bottleneck_output_16 L__self___features_denseblock2_denselayer3_conv1 (l__self___features_denseblock2_denselayer3_relu1,) {} call_module l__self___features_denseblock2_denselayer3_norm2 L__self___features_denseblock2_denselayer3_norm2 (bottleneck_output_16,) {} call_module l__self___features_denseblock2_denselayer3_relu2 L__self___features_denseblock2_denselayer3_relu2 (l__self___features_denseblock2_denselayer3_norm2,) {} call_module new_features_16 L__self___features_denseblock2_denselayer3_conv2 (l__self___features_denseblock2_denselayer3_relu2,) {} - call_function concated_features_9 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16], 1) {} + call_function concated_features_9 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16], 1) {} call_module l__self___features_denseblock2_denselayer4_norm1 L__self___features_denseblock2_denselayer4_norm1 (concated_features_9,) {} call_module l__self___features_denseblock2_denselayer4_relu1 L__self___features_denseblock2_denselayer4_relu1 (l__self___features_denseblock2_denselayer4_norm1,) {} call_module bottleneck_output_18 L__self___features_denseblock2_denselayer4_conv1 (l__self___features_denseblock2_denselayer4_relu1,) {} call_module l__self___features_denseblock2_denselayer4_norm2 L__self___features_denseblock2_denselayer4_norm2 (bottleneck_output_18,) {} call_module l__self___features_denseblock2_denselayer4_relu2 L__self___features_denseblock2_denselayer4_relu2 (l__self___features_denseblock2_denselayer4_norm2,) {} call_module new_features_18 L__self___features_denseblock2_denselayer4_conv2 (l__self___features_denseblock2_denselayer4_relu2,) {} - call_function concated_features_10 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18], 1) {} + call_function concated_features_10 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18], 1) {} call_module l__self___features_denseblock2_denselayer5_norm1 L__self___features_denseblock2_denselayer5_norm1 (concated_features_10,) {} call_module l__self___features_denseblock2_denselayer5_relu1 L__self___features_denseblock2_denselayer5_relu1 (l__self___features_denseblock2_denselayer5_norm1,) {} call_module bottleneck_output_20 L__self___features_denseblock2_denselayer5_conv1 (l__self___features_denseblock2_denselayer5_relu1,) {} call_module l__self___features_denseblock2_denselayer5_norm2 L__self___features_denseblock2_denselayer5_norm2 (bottleneck_output_20,) {} call_module l__self___features_denseblock2_denselayer5_relu2 L__self___features_denseblock2_denselayer5_relu2 (l__self___features_denseblock2_denselayer5_norm2,) {} call_module new_features_20 L__self___features_denseblock2_denselayer5_conv2 (l__self___features_denseblock2_denselayer5_relu2,) {} - call_function concated_features_11 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20], 1) {} + call_function concated_features_11 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20], 1) {} call_module l__self___features_denseblock2_denselayer6_norm1 L__self___features_denseblock2_denselayer6_norm1 (concated_features_11,) {} call_module l__self___features_denseblock2_denselayer6_relu1 L__self___features_denseblock2_denselayer6_relu1 (l__self___features_denseblock2_denselayer6_norm1,) {} call_module bottleneck_output_22 L__self___features_denseblock2_denselayer6_conv1 (l__self___features_denseblock2_denselayer6_relu1,) {} call_module l__self___features_denseblock2_denselayer6_norm2 L__self___features_denseblock2_denselayer6_norm2 (bottleneck_output_22,) {} call_module l__self___features_denseblock2_denselayer6_relu2 L__self___features_denseblock2_denselayer6_relu2 (l__self___features_denseblock2_denselayer6_norm2,) {} call_module new_features_22 L__self___features_denseblock2_denselayer6_conv2 (l__self___features_denseblock2_denselayer6_relu2,) {} - call_function concated_features_12 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22], 1) {} + call_function concated_features_12 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22], 1) {} call_module l__self___features_denseblock2_denselayer7_norm1 L__self___features_denseblock2_denselayer7_norm1 (concated_features_12,) {} call_module l__self___features_denseblock2_denselayer7_relu1 L__self___features_denseblock2_denselayer7_relu1 (l__self___features_denseblock2_denselayer7_norm1,) {} call_module bottleneck_output_24 L__self___features_denseblock2_denselayer7_conv1 (l__self___features_denseblock2_denselayer7_relu1,) {} call_module l__self___features_denseblock2_denselayer7_norm2 L__self___features_denseblock2_denselayer7_norm2 (bottleneck_output_24,) {} call_module l__self___features_denseblock2_denselayer7_relu2 L__self___features_denseblock2_denselayer7_relu2 (l__self___features_denseblock2_denselayer7_norm2,) {} call_module new_features_24 L__self___features_denseblock2_denselayer7_conv2 (l__self___features_denseblock2_denselayer7_relu2,) {} - call_function concated_features_13 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24], 1) {} + call_function concated_features_13 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24], 1) {} call_module l__self___features_denseblock2_denselayer8_norm1 L__self___features_denseblock2_denselayer8_norm1 (concated_features_13,) {} call_module l__self___features_denseblock2_denselayer8_relu1 L__self___features_denseblock2_denselayer8_relu1 (l__self___features_denseblock2_denselayer8_norm1,) {} call_module bottleneck_output_26 L__self___features_denseblock2_denselayer8_conv1 (l__self___features_denseblock2_denselayer8_relu1,) {} call_module l__self___features_denseblock2_denselayer8_norm2 L__self___features_denseblock2_denselayer8_norm2 (bottleneck_output_26,) {} call_module l__self___features_denseblock2_denselayer8_relu2 L__self___features_denseblock2_denselayer8_relu2 (l__self___features_denseblock2_denselayer8_norm2,) {} call_module new_features_26 L__self___features_denseblock2_denselayer8_conv2 (l__self___features_denseblock2_denselayer8_relu2,) {} - call_function concated_features_14 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26], 1) {} + call_function concated_features_14 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26], 1) {} call_module l__self___features_denseblock2_denselayer9_norm1 L__self___features_denseblock2_denselayer9_norm1 (concated_features_14,) {} call_module l__self___features_denseblock2_denselayer9_relu1 L__self___features_denseblock2_denselayer9_relu1 (l__self___features_denseblock2_denselayer9_norm1,) {} call_module bottleneck_output_28 L__self___features_denseblock2_denselayer9_conv1 (l__self___features_denseblock2_denselayer9_relu1,) {} call_module l__self___features_denseblock2_denselayer9_norm2 L__self___features_denseblock2_denselayer9_norm2 (bottleneck_output_28,) {} call_module l__self___features_denseblock2_denselayer9_relu2 L__self___features_denseblock2_denselayer9_relu2 (l__self___features_denseblock2_denselayer9_norm2,) {} call_module new_features_28 L__self___features_denseblock2_denselayer9_conv2 (l__self___features_denseblock2_denselayer9_relu2,) {} - call_function concated_features_15 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26, new_features_28], 1) {} + call_function concated_features_15 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26, new_features_28], 1) {} call_module l__self___features_denseblock2_denselayer10_norm1 L__self___features_denseblock2_denselayer10_norm1 (concated_features_15,) {} call_module l__self___features_denseblock2_denselayer10_relu1 L__self___features_denseblock2_denselayer10_relu1 (l__self___features_denseblock2_denselayer10_norm1,) {} call_module bottleneck_output_30 L__self___features_denseblock2_denselayer10_conv1 (l__self___features_denseblock2_denselayer10_relu1,) {} call_module l__self___features_denseblock2_denselayer10_norm2 L__self___features_denseblock2_denselayer10_norm2 (bottleneck_output_30,) {} call_module l__self___features_denseblock2_denselayer10_relu2 L__self___features_denseblock2_denselayer10_relu2 (l__self___features_denseblock2_denselayer10_norm2,) {} call_module new_features_30 L__self___features_denseblock2_denselayer10_conv2 (l__self___features_denseblock2_denselayer10_relu2,) {} - call_function concated_features_16 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26, new_features_28, new_features_30], 1) {} + call_function concated_features_16 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26, new_features_28, new_features_30], 1) {} call_module l__self___features_denseblock2_denselayer11_norm1 L__self___features_denseblock2_denselayer11_norm1 (concated_features_16,) {} call_module l__self___features_denseblock2_denselayer11_relu1 L__self___features_denseblock2_denselayer11_relu1 (l__self___features_denseblock2_denselayer11_norm1,) {} call_module bottleneck_output_32 L__self___features_denseblock2_denselayer11_conv1 (l__self___features_denseblock2_denselayer11_relu1,) {} call_module l__self___features_denseblock2_denselayer11_norm2 L__self___features_denseblock2_denselayer11_norm2 (bottleneck_output_32,) {} call_module l__self___features_denseblock2_denselayer11_relu2 L__self___features_denseblock2_denselayer11_relu2 (l__self___features_denseblock2_denselayer11_norm2,) {} call_module new_features_32 L__self___features_denseblock2_denselayer11_conv2 (l__self___features_denseblock2_denselayer11_relu2,) {} - call_function concated_features_17 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26, new_features_28, new_features_30, new_features_32], 1) {} + call_function concated_features_17 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26, new_features_28, new_features_30, new_features_32], 1) {} call_module l__self___features_denseblock2_denselayer12_norm1 L__self___features_denseblock2_denselayer12_norm1 (concated_features_17,) {} call_module l__self___features_denseblock2_denselayer12_relu1 L__self___features_denseblock2_denselayer12_relu1 (l__self___features_denseblock2_denselayer12_norm1,) {} call_module bottleneck_output_34 L__self___features_denseblock2_denselayer12_conv1 (l__self___features_denseblock2_denselayer12_relu1,) {} call_module l__self___features_denseblock2_denselayer12_norm2 L__self___features_denseblock2_denselayer12_norm2 (bottleneck_output_34,) {} call_module l__self___features_denseblock2_denselayer12_relu2 L__self___features_denseblock2_denselayer12_relu2 (l__self___features_denseblock2_denselayer12_norm2,) {} call_module new_features_34 L__self___features_denseblock2_denselayer12_conv2 (l__self___features_denseblock2_denselayer12_relu2,) {} - call_function cat_19 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26, new_features_28, new_features_30, new_features_32, new_features_34], 1) {} + call_function cat_19 ([l__self___features_transition1_pool, new_features_12, new_features_14, new_features_16, new_features_18, new_features_20, new_features_22, new_features_24, new_features_26, new_features_28, new_features_30, new_features_32, new_features_34], 1) {} call_module l__self___features_transition2_norm L__self___features_transition2_norm (cat_19,) {} call_module l__self___features_transition2_relu L__self___features_transition2_relu (l__self___features_transition2_norm,) {} call_module l__self___features_transition2_conv L__self___features_transition2_conv (l__self___features_transition2_relu,) {} call_module l__self___features_transition2_pool L__self___features_transition2_pool (l__self___features_transition2_conv,) {} - call_function concated_features_18 ([l__self___features_transition2_pool], 1) {} + call_function concated_features_18 ([l__self___features_transition2_pool], 1) {} call_module l__self___features_denseblock3_denselayer1_norm1 L__self___features_denseblock3_denselayer1_norm1 (concated_features_18,) {} call_module l__self___features_denseblock3_denselayer1_relu1 L__self___features_denseblock3_denselayer1_relu1 (l__self___features_denseblock3_denselayer1_norm1,) {} call_module bottleneck_output_36 L__self___features_denseblock3_denselayer1_conv1 (l__self___features_denseblock3_denselayer1_relu1,) {} call_module l__self___features_denseblock3_denselayer1_norm2 L__self___features_denseblock3_denselayer1_norm2 (bottleneck_output_36,) {} call_module l__self___features_denseblock3_denselayer1_relu2 L__self___features_denseblock3_denselayer1_relu2 (l__self___features_denseblock3_denselayer1_norm2,) {} call_module new_features_36 L__self___features_denseblock3_denselayer1_conv2 (l__self___features_denseblock3_denselayer1_relu2,) {} - call_function concated_features_19 ([l__self___features_transition2_pool, new_features_36], 1) {} + call_function concated_features_19 ([l__self___features_transition2_pool, new_features_36], 1) {} call_module l__self___features_denseblock3_denselayer2_norm1 L__self___features_denseblock3_denselayer2_norm1 (concated_features_19,) {} call_module l__self___features_denseblock3_denselayer2_relu1 L__self___features_denseblock3_denselayer2_relu1 (l__self___features_denseblock3_denselayer2_norm1,) {} call_module bottleneck_output_38 L__self___features_denseblock3_denselayer2_conv1 (l__self___features_denseblock3_denselayer2_relu1,) {} call_module l__self___features_denseblock3_denselayer2_norm2 L__self___features_denseblock3_denselayer2_norm2 (bottleneck_output_38,) {} call_module l__self___features_denseblock3_denselayer2_relu2 L__self___features_denseblock3_denselayer2_relu2 (l__self___features_denseblock3_denselayer2_norm2,) {} call_module new_features_38 L__self___features_denseblock3_denselayer2_conv2 (l__self___features_denseblock3_denselayer2_relu2,) {} - call_function concated_features_20 ([l__self___features_transition2_pool, new_features_36, new_features_38], 1) {} + call_function concated_features_20 ([l__self___features_transition2_pool, new_features_36, new_features_38], 1) {} call_module l__self___features_denseblock3_denselayer3_norm1 L__self___features_denseblock3_denselayer3_norm1 (concated_features_20,) {} call_module l__self___features_denseblock3_denselayer3_relu1 L__self___features_denseblock3_denselayer3_relu1 (l__self___features_denseblock3_denselayer3_norm1,) {} call_module bottleneck_output_40 L__self___features_denseblock3_denselayer3_conv1 (l__self___features_denseblock3_denselayer3_relu1,) {} call_module l__self___features_denseblock3_denselayer3_norm2 L__self___features_denseblock3_denselayer3_norm2 (bottleneck_output_40,) {} call_module l__self___features_denseblock3_denselayer3_relu2 L__self___features_denseblock3_denselayer3_relu2 (l__self___features_denseblock3_denselayer3_norm2,) {} call_module new_features_40 L__self___features_denseblock3_denselayer3_conv2 (l__self___features_denseblock3_denselayer3_relu2,) {} - call_function concated_features_21 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40], 1) {} + call_function concated_features_21 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40], 1) {} call_module l__self___features_denseblock3_denselayer4_norm1 L__self___features_denseblock3_denselayer4_norm1 (concated_features_21,) {} call_module l__self___features_denseblock3_denselayer4_relu1 L__self___features_denseblock3_denselayer4_relu1 (l__self___features_denseblock3_denselayer4_norm1,) {} call_module bottleneck_output_42 L__self___features_denseblock3_denselayer4_conv1 (l__self___features_denseblock3_denselayer4_relu1,) {} call_module l__self___features_denseblock3_denselayer4_norm2 L__self___features_denseblock3_denselayer4_norm2 (bottleneck_output_42,) {} call_module l__self___features_denseblock3_denselayer4_relu2 L__self___features_denseblock3_denselayer4_relu2 (l__self___features_denseblock3_denselayer4_norm2,) {} call_module new_features_42 L__self___features_denseblock3_denselayer4_conv2 (l__self___features_denseblock3_denselayer4_relu2,) {} - call_function concated_features_22 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42], 1) {} + call_function concated_features_22 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42], 1) {} call_module l__self___features_denseblock3_denselayer5_norm1 L__self___features_denseblock3_denselayer5_norm1 (concated_features_22,) {} call_module l__self___features_denseblock3_denselayer5_relu1 L__self___features_denseblock3_denselayer5_relu1 (l__self___features_denseblock3_denselayer5_norm1,) {} call_module bottleneck_output_44 L__self___features_denseblock3_denselayer5_conv1 (l__self___features_denseblock3_denselayer5_relu1,) {} call_module l__self___features_denseblock3_denselayer5_norm2 L__self___features_denseblock3_denselayer5_norm2 (bottleneck_output_44,) {} call_module l__self___features_denseblock3_denselayer5_relu2 L__self___features_denseblock3_denselayer5_relu2 (l__self___features_denseblock3_denselayer5_norm2,) {} call_module new_features_44 L__self___features_denseblock3_denselayer5_conv2 (l__self___features_denseblock3_denselayer5_relu2,) {} - call_function concated_features_23 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44], 1) {} + call_function concated_features_23 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44], 1) {} call_module l__self___features_denseblock3_denselayer6_norm1 L__self___features_denseblock3_denselayer6_norm1 (concated_features_23,) {} call_module l__self___features_denseblock3_denselayer6_relu1 L__self___features_denseblock3_denselayer6_relu1 (l__self___features_denseblock3_denselayer6_norm1,) {} call_module bottleneck_output_46 L__self___features_denseblock3_denselayer6_conv1 (l__self___features_denseblock3_denselayer6_relu1,) {} call_module l__self___features_denseblock3_denselayer6_norm2 L__self___features_denseblock3_denselayer6_norm2 (bottleneck_output_46,) {} call_module l__self___features_denseblock3_denselayer6_relu2 L__self___features_denseblock3_denselayer6_relu2 (l__self___features_denseblock3_denselayer6_norm2,) {} call_module new_features_46 L__self___features_denseblock3_denselayer6_conv2 (l__self___features_denseblock3_denselayer6_relu2,) {} - call_function concated_features_24 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46], 1) {} + call_function concated_features_24 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46], 1) {} call_module l__self___features_denseblock3_denselayer7_norm1 L__self___features_denseblock3_denselayer7_norm1 (concated_features_24,) {} call_module l__self___features_denseblock3_denselayer7_relu1 L__self___features_denseblock3_denselayer7_relu1 (l__self___features_denseblock3_denselayer7_norm1,) {} call_module bottleneck_output_48 L__self___features_denseblock3_denselayer7_conv1 (l__self___features_denseblock3_denselayer7_relu1,) {} call_module l__self___features_denseblock3_denselayer7_norm2 L__self___features_denseblock3_denselayer7_norm2 (bottleneck_output_48,) {} call_module l__self___features_denseblock3_denselayer7_relu2 L__self___features_denseblock3_denselayer7_relu2 (l__self___features_denseblock3_denselayer7_norm2,) {} call_module new_features_48 L__self___features_denseblock3_denselayer7_conv2 (l__self___features_denseblock3_denselayer7_relu2,) {} - call_function concated_features_25 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48], 1) {} + call_function concated_features_25 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48], 1) {} call_module l__self___features_denseblock3_denselayer8_norm1 L__self___features_denseblock3_denselayer8_norm1 (concated_features_25,) {} call_module l__self___features_denseblock3_denselayer8_relu1 L__self___features_denseblock3_denselayer8_relu1 (l__self___features_denseblock3_denselayer8_norm1,) {} call_module bottleneck_output_50 L__self___features_denseblock3_denselayer8_conv1 (l__self___features_denseblock3_denselayer8_relu1,) {} call_module l__self___features_denseblock3_denselayer8_norm2 L__self___features_denseblock3_denselayer8_norm2 (bottleneck_output_50,) {} call_module l__self___features_denseblock3_denselayer8_relu2 L__self___features_denseblock3_denselayer8_relu2 (l__self___features_denseblock3_denselayer8_norm2,) {} call_module new_features_50 L__self___features_denseblock3_denselayer8_conv2 (l__self___features_denseblock3_denselayer8_relu2,) {} - call_function concated_features_26 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50], 1) {} + call_function concated_features_26 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50], 1) {} call_module l__self___features_denseblock3_denselayer9_norm1 L__self___features_denseblock3_denselayer9_norm1 (concated_features_26,) {} call_module l__self___features_denseblock3_denselayer9_relu1 L__self___features_denseblock3_denselayer9_relu1 (l__self___features_denseblock3_denselayer9_norm1,) {} call_module bottleneck_output_52 L__self___features_denseblock3_denselayer9_conv1 (l__self___features_denseblock3_denselayer9_relu1,) {} call_module l__self___features_denseblock3_denselayer9_norm2 L__self___features_denseblock3_denselayer9_norm2 (bottleneck_output_52,) {} call_module l__self___features_denseblock3_denselayer9_relu2 L__self___features_denseblock3_denselayer9_relu2 (l__self___features_denseblock3_denselayer9_norm2,) {} call_module new_features_52 L__self___features_denseblock3_denselayer9_conv2 (l__self___features_denseblock3_denselayer9_relu2,) {} - call_function concated_features_27 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52], 1) {} + call_function concated_features_27 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52], 1) {} call_module l__self___features_denseblock3_denselayer10_norm1 L__self___features_denseblock3_denselayer10_norm1 (concated_features_27,) {} call_module l__self___features_denseblock3_denselayer10_relu1 L__self___features_denseblock3_denselayer10_relu1 (l__self___features_denseblock3_denselayer10_norm1,) {} call_module bottleneck_output_54 L__self___features_denseblock3_denselayer10_conv1 (l__self___features_denseblock3_denselayer10_relu1,) {} call_module l__self___features_denseblock3_denselayer10_norm2 L__self___features_denseblock3_denselayer10_norm2 (bottleneck_output_54,) {} call_module l__self___features_denseblock3_denselayer10_relu2 L__self___features_denseblock3_denselayer10_relu2 (l__self___features_denseblock3_denselayer10_norm2,) {} call_module new_features_54 L__self___features_denseblock3_denselayer10_conv2 (l__self___features_denseblock3_denselayer10_relu2,) {} - call_function concated_features_28 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54], 1) {} + call_function concated_features_28 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54], 1) {} call_module l__self___features_denseblock3_denselayer11_norm1 L__self___features_denseblock3_denselayer11_norm1 (concated_features_28,) {} call_module l__self___features_denseblock3_denselayer11_relu1 L__self___features_denseblock3_denselayer11_relu1 (l__self___features_denseblock3_denselayer11_norm1,) {} call_module bottleneck_output_56 L__self___features_denseblock3_denselayer11_conv1 (l__self___features_denseblock3_denselayer11_relu1,) {} call_module l__self___features_denseblock3_denselayer11_norm2 L__self___features_denseblock3_denselayer11_norm2 (bottleneck_output_56,) {} call_module l__self___features_denseblock3_denselayer11_relu2 L__self___features_denseblock3_denselayer11_relu2 (l__self___features_denseblock3_denselayer11_norm2,) {} call_module new_features_56 L__self___features_denseblock3_denselayer11_conv2 (l__self___features_denseblock3_denselayer11_relu2,) {} - call_function concated_features_29 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56], 1) {} + call_function concated_features_29 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56], 1) {} call_module l__self___features_denseblock3_denselayer12_norm1 L__self___features_denseblock3_denselayer12_norm1 (concated_features_29,) {} call_module l__self___features_denseblock3_denselayer12_relu1 L__self___features_denseblock3_denselayer12_relu1 (l__self___features_denseblock3_denselayer12_norm1,) {} call_module bottleneck_output_58 L__self___features_denseblock3_denselayer12_conv1 (l__self___features_denseblock3_denselayer12_relu1,) {} call_module l__self___features_denseblock3_denselayer12_norm2 L__self___features_denseblock3_denselayer12_norm2 (bottleneck_output_58,) {} call_module l__self___features_denseblock3_denselayer12_relu2 L__self___features_denseblock3_denselayer12_relu2 (l__self___features_denseblock3_denselayer12_norm2,) {} call_module new_features_58 L__self___features_denseblock3_denselayer12_conv2 (l__self___features_denseblock3_denselayer12_relu2,) {} - call_function concated_features_30 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58], 1) {} + call_function concated_features_30 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58], 1) {} call_module l__self___features_denseblock3_denselayer13_norm1 L__self___features_denseblock3_denselayer13_norm1 (concated_features_30,) {} call_module l__self___features_denseblock3_denselayer13_relu1 L__self___features_denseblock3_denselayer13_relu1 (l__self___features_denseblock3_denselayer13_norm1,) {} call_module bottleneck_output_60 L__self___features_denseblock3_denselayer13_conv1 (l__self___features_denseblock3_denselayer13_relu1,) {} call_module l__self___features_denseblock3_denselayer13_norm2 L__self___features_denseblock3_denselayer13_norm2 (bottleneck_output_60,) {} call_module l__self___features_denseblock3_denselayer13_relu2 L__self___features_denseblock3_denselayer13_relu2 (l__self___features_denseblock3_denselayer13_norm2,) {} call_module new_features_60 L__self___features_denseblock3_denselayer13_conv2 (l__self___features_denseblock3_denselayer13_relu2,) {} - call_function concated_features_31 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60], 1) {} + call_function concated_features_31 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60], 1) {} call_module l__self___features_denseblock3_denselayer14_norm1 L__self___features_denseblock3_denselayer14_norm1 (concated_features_31,) {} call_module l__self___features_denseblock3_denselayer14_relu1 L__self___features_denseblock3_denselayer14_relu1 (l__self___features_denseblock3_denselayer14_norm1,) {} call_module bottleneck_output_62 L__self___features_denseblock3_denselayer14_conv1 (l__self___features_denseblock3_denselayer14_relu1,) {} call_module l__self___features_denseblock3_denselayer14_norm2 L__self___features_denseblock3_denselayer14_norm2 (bottleneck_output_62,) {} call_module l__self___features_denseblock3_denselayer14_relu2 L__self___features_denseblock3_denselayer14_relu2 (l__self___features_denseblock3_denselayer14_norm2,) {} call_module new_features_62 L__self___features_denseblock3_denselayer14_conv2 (l__self___features_denseblock3_denselayer14_relu2,) {} - call_function concated_features_32 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62], 1) {} + call_function concated_features_32 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62], 1) {} call_module l__self___features_denseblock3_denselayer15_norm1 L__self___features_denseblock3_denselayer15_norm1 (concated_features_32,) {} call_module l__self___features_denseblock3_denselayer15_relu1 L__self___features_denseblock3_denselayer15_relu1 (l__self___features_denseblock3_denselayer15_norm1,) {} call_module bottleneck_output_64 L__self___features_denseblock3_denselayer15_conv1 (l__self___features_denseblock3_denselayer15_relu1,) {} call_module l__self___features_denseblock3_denselayer15_norm2 L__self___features_denseblock3_denselayer15_norm2 (bottleneck_output_64,) {} call_module l__self___features_denseblock3_denselayer15_relu2 L__self___features_denseblock3_denselayer15_relu2 (l__self___features_denseblock3_denselayer15_norm2,) {} call_module new_features_64 L__self___features_denseblock3_denselayer15_conv2 (l__self___features_denseblock3_denselayer15_relu2,) {} - call_function concated_features_33 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64], 1) {} + call_function concated_features_33 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64], 1) {} call_module l__self___features_denseblock3_denselayer16_norm1 L__self___features_denseblock3_denselayer16_norm1 (concated_features_33,) {} call_module l__self___features_denseblock3_denselayer16_relu1 L__self___features_denseblock3_denselayer16_relu1 (l__self___features_denseblock3_denselayer16_norm1,) {} call_module bottleneck_output_66 L__self___features_denseblock3_denselayer16_conv1 (l__self___features_denseblock3_denselayer16_relu1,) {} call_module l__self___features_denseblock3_denselayer16_norm2 L__self___features_denseblock3_denselayer16_norm2 (bottleneck_output_66,) {} call_module l__self___features_denseblock3_denselayer16_relu2 L__self___features_denseblock3_denselayer16_relu2 (l__self___features_denseblock3_denselayer16_norm2,) {} call_module new_features_66 L__self___features_denseblock3_denselayer16_conv2 (l__self___features_denseblock3_denselayer16_relu2,) {} - call_function concated_features_34 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66], 1) {} + call_function concated_features_34 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66], 1) {} call_module l__self___features_denseblock3_denselayer17_norm1 L__self___features_denseblock3_denselayer17_norm1 (concated_features_34,) {} call_module l__self___features_denseblock3_denselayer17_relu1 L__self___features_denseblock3_denselayer17_relu1 (l__self___features_denseblock3_denselayer17_norm1,) {} call_module bottleneck_output_68 L__self___features_denseblock3_denselayer17_conv1 (l__self___features_denseblock3_denselayer17_relu1,) {} call_module l__self___features_denseblock3_denselayer17_norm2 L__self___features_denseblock3_denselayer17_norm2 (bottleneck_output_68,) {} call_module l__self___features_denseblock3_denselayer17_relu2 L__self___features_denseblock3_denselayer17_relu2 (l__self___features_denseblock3_denselayer17_norm2,) {} call_module new_features_68 L__self___features_denseblock3_denselayer17_conv2 (l__self___features_denseblock3_denselayer17_relu2,) {} - call_function concated_features_35 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68], 1) {} + call_function concated_features_35 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68], 1) {} call_module l__self___features_denseblock3_denselayer18_norm1 L__self___features_denseblock3_denselayer18_norm1 (concated_features_35,) {} call_module l__self___features_denseblock3_denselayer18_relu1 L__self___features_denseblock3_denselayer18_relu1 (l__self___features_denseblock3_denselayer18_norm1,) {} call_module bottleneck_output_70 L__self___features_denseblock3_denselayer18_conv1 (l__self___features_denseblock3_denselayer18_relu1,) {} call_module l__self___features_denseblock3_denselayer18_norm2 L__self___features_denseblock3_denselayer18_norm2 (bottleneck_output_70,) {} call_module l__self___features_denseblock3_denselayer18_relu2 L__self___features_denseblock3_denselayer18_relu2 (l__self___features_denseblock3_denselayer18_norm2,) {} call_module new_features_70 L__self___features_denseblock3_denselayer18_conv2 (l__self___features_denseblock3_denselayer18_relu2,) {} - call_function concated_features_36 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70], 1) {} + call_function concated_features_36 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70], 1) {} call_module l__self___features_denseblock3_denselayer19_norm1 L__self___features_denseblock3_denselayer19_norm1 (concated_features_36,) {} call_module l__self___features_denseblock3_denselayer19_relu1 L__self___features_denseblock3_denselayer19_relu1 (l__self___features_denseblock3_denselayer19_norm1,) {} call_module bottleneck_output_72 L__self___features_denseblock3_denselayer19_conv1 (l__self___features_denseblock3_denselayer19_relu1,) {} call_module l__self___features_denseblock3_denselayer19_norm2 L__self___features_denseblock3_denselayer19_norm2 (bottleneck_output_72,) {} call_module l__self___features_denseblock3_denselayer19_relu2 L__self___features_denseblock3_denselayer19_relu2 (l__self___features_denseblock3_denselayer19_norm2,) {} call_module new_features_72 L__self___features_denseblock3_denselayer19_conv2 (l__self___features_denseblock3_denselayer19_relu2,) {} - call_function concated_features_37 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72], 1) {} + call_function concated_features_37 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72], 1) {} call_module l__self___features_denseblock3_denselayer20_norm1 L__self___features_denseblock3_denselayer20_norm1 (concated_features_37,) {} call_module l__self___features_denseblock3_denselayer20_relu1 L__self___features_denseblock3_denselayer20_relu1 (l__self___features_denseblock3_denselayer20_norm1,) {} call_module bottleneck_output_74 L__self___features_denseblock3_denselayer20_conv1 (l__self___features_denseblock3_denselayer20_relu1,) {} call_module l__self___features_denseblock3_denselayer20_norm2 L__self___features_denseblock3_denselayer20_norm2 (bottleneck_output_74,) {} call_module l__self___features_denseblock3_denselayer20_relu2 L__self___features_denseblock3_denselayer20_relu2 (l__self___features_denseblock3_denselayer20_norm2,) {} call_module new_features_74 L__self___features_denseblock3_denselayer20_conv2 (l__self___features_denseblock3_denselayer20_relu2,) {} - call_function concated_features_38 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74], 1) {} + call_function concated_features_38 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74], 1) {} call_module l__self___features_denseblock3_denselayer21_norm1 L__self___features_denseblock3_denselayer21_norm1 (concated_features_38,) {} call_module l__self___features_denseblock3_denselayer21_relu1 L__self___features_denseblock3_denselayer21_relu1 (l__self___features_denseblock3_denselayer21_norm1,) {} call_module bottleneck_output_76 L__self___features_denseblock3_denselayer21_conv1 (l__self___features_denseblock3_denselayer21_relu1,) {} call_module l__self___features_denseblock3_denselayer21_norm2 L__self___features_denseblock3_denselayer21_norm2 (bottleneck_output_76,) {} call_module l__self___features_denseblock3_denselayer21_relu2 L__self___features_denseblock3_denselayer21_relu2 (l__self___features_denseblock3_denselayer21_norm2,) {} call_module new_features_76 L__self___features_denseblock3_denselayer21_conv2 (l__self___features_denseblock3_denselayer21_relu2,) {} - call_function concated_features_39 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74, new_features_76], 1) {} + call_function concated_features_39 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74, new_features_76], 1) {} call_module l__self___features_denseblock3_denselayer22_norm1 L__self___features_denseblock3_denselayer22_norm1 (concated_features_39,) {} call_module l__self___features_denseblock3_denselayer22_relu1 L__self___features_denseblock3_denselayer22_relu1 (l__self___features_denseblock3_denselayer22_norm1,) {} call_module bottleneck_output_78 L__self___features_denseblock3_denselayer22_conv1 (l__self___features_denseblock3_denselayer22_relu1,) {} call_module l__self___features_denseblock3_denselayer22_norm2 L__self___features_denseblock3_denselayer22_norm2 (bottleneck_output_78,) {} call_module l__self___features_denseblock3_denselayer22_relu2 L__self___features_denseblock3_denselayer22_relu2 (l__self___features_denseblock3_denselayer22_norm2,) {} call_module new_features_78 L__self___features_denseblock3_denselayer22_conv2 (l__self___features_denseblock3_denselayer22_relu2,) {} - call_function concated_features_40 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74, new_features_76, new_features_78], 1) {} + call_function concated_features_40 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74, new_features_76, new_features_78], 1) {} call_module l__self___features_denseblock3_denselayer23_norm1 L__self___features_denseblock3_denselayer23_norm1 (concated_features_40,) {} call_module l__self___features_denseblock3_denselayer23_relu1 L__self___features_denseblock3_denselayer23_relu1 (l__self___features_denseblock3_denselayer23_norm1,) {} call_module bottleneck_output_80 L__self___features_denseblock3_denselayer23_conv1 (l__self___features_denseblock3_denselayer23_relu1,) {} call_module l__self___features_denseblock3_denselayer23_norm2 L__self___features_denseblock3_denselayer23_norm2 (bottleneck_output_80,) {} call_module l__self___features_denseblock3_denselayer23_relu2 L__self___features_denseblock3_denselayer23_relu2 (l__self___features_denseblock3_denselayer23_norm2,) {} call_module new_features_80 L__self___features_denseblock3_denselayer23_conv2 (l__self___features_denseblock3_denselayer23_relu2,) {} - call_function concated_features_41 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74, new_features_76, new_features_78, new_features_80], 1) {} + call_function concated_features_41 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74, new_features_76, new_features_78, new_features_80], 1) {} call_module l__self___features_denseblock3_denselayer24_norm1 L__self___features_denseblock3_denselayer24_norm1 (concated_features_41,) {} call_module l__self___features_denseblock3_denselayer24_relu1 L__self___features_denseblock3_denselayer24_relu1 (l__self___features_denseblock3_denselayer24_norm1,) {} call_module bottleneck_output_82 L__self___features_denseblock3_denselayer24_conv1 (l__self___features_denseblock3_denselayer24_relu1,) {} call_module l__self___features_denseblock3_denselayer24_norm2 L__self___features_denseblock3_denselayer24_norm2 (bottleneck_output_82,) {} call_module l__self___features_denseblock3_denselayer24_relu2 L__self___features_denseblock3_denselayer24_relu2 (l__self___features_denseblock3_denselayer24_norm2,) {} call_module new_features_82 L__self___features_denseblock3_denselayer24_conv2 (l__self___features_denseblock3_denselayer24_relu2,) {} - call_function cat_44 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74, new_features_76, new_features_78, new_features_80, new_features_82], 1) {} + call_function cat_44 ([l__self___features_transition2_pool, new_features_36, new_features_38, new_features_40, new_features_42, new_features_44, new_features_46, new_features_48, new_features_50, new_features_52, new_features_54, new_features_56, new_features_58, new_features_60, new_features_62, new_features_64, new_features_66, new_features_68, new_features_70, new_features_72, new_features_74, new_features_76, new_features_78, new_features_80, new_features_82], 1) {} call_module l__self___features_transition3_norm L__self___features_transition3_norm (cat_44,) {} call_module l__self___features_transition3_relu L__self___features_transition3_relu (l__self___features_transition3_norm,) {} call_module l__self___features_transition3_conv L__self___features_transition3_conv (l__self___features_transition3_relu,) {} call_module l__self___features_transition3_pool L__self___features_transition3_pool (l__self___features_transition3_conv,) {} - call_function concated_features_42 ([l__self___features_transition3_pool], 1) {} + call_function concated_features_42 ([l__self___features_transition3_pool], 1) {} call_module l__self___features_denseblock4_denselayer1_norm1 L__self___features_denseblock4_denselayer1_norm1 (concated_features_42,) {} call_module l__self___features_denseblock4_denselayer1_relu1 L__self___features_denseblock4_denselayer1_relu1 (l__self___features_denseblock4_denselayer1_norm1,) {} call_module bottleneck_output_84 L__self___features_denseblock4_denselayer1_conv1 (l__self___features_denseblock4_denselayer1_relu1,) {} call_module l__self___features_denseblock4_denselayer1_norm2 L__self___features_denseblock4_denselayer1_norm2 (bottleneck_output_84,) {} call_module l__self___features_denseblock4_denselayer1_relu2 L__self___features_denseblock4_denselayer1_relu2 (l__self___features_denseblock4_denselayer1_norm2,) {} call_module new_features_84 L__self___features_denseblock4_denselayer1_conv2 (l__self___features_denseblock4_denselayer1_relu2,) {} - call_function concated_features_43 ([l__self___features_transition3_pool, new_features_84], 1) {} + call_function concated_features_43 ([l__self___features_transition3_pool, new_features_84], 1) {} call_module l__self___features_denseblock4_denselayer2_norm1 L__self___features_denseblock4_denselayer2_norm1 (concated_features_43,) {} call_module l__self___features_denseblock4_denselayer2_relu1 L__self___features_denseblock4_denselayer2_relu1 (l__self___features_denseblock4_denselayer2_norm1,) {} call_module bottleneck_output_86 L__self___features_denseblock4_denselayer2_conv1 (l__self___features_denseblock4_denselayer2_relu1,) {} call_module l__self___features_denseblock4_denselayer2_norm2 L__self___features_denseblock4_denselayer2_norm2 (bottleneck_output_86,) {} call_module l__self___features_denseblock4_denselayer2_relu2 L__self___features_denseblock4_denselayer2_relu2 (l__self___features_denseblock4_denselayer2_norm2,) {} call_module new_features_86 L__self___features_denseblock4_denselayer2_conv2 (l__self___features_denseblock4_denselayer2_relu2,) {} - call_function concated_features_44 ([l__self___features_transition3_pool, new_features_84, new_features_86], 1) {} + call_function concated_features_44 ([l__self___features_transition3_pool, new_features_84, new_features_86], 1) {} call_module l__self___features_denseblock4_denselayer3_norm1 L__self___features_denseblock4_denselayer3_norm1 (concated_features_44,) {} call_module l__self___features_denseblock4_denselayer3_relu1 L__self___features_denseblock4_denselayer3_relu1 (l__self___features_denseblock4_denselayer3_norm1,) {} call_module bottleneck_output_88 L__self___features_denseblock4_denselayer3_conv1 (l__self___features_denseblock4_denselayer3_relu1,) {} call_module l__self___features_denseblock4_denselayer3_norm2 L__self___features_denseblock4_denselayer3_norm2 (bottleneck_output_88,) {} call_module l__self___features_denseblock4_denselayer3_relu2 L__self___features_denseblock4_denselayer3_relu2 (l__self___features_denseblock4_denselayer3_norm2,) {} call_module new_features_88 L__self___features_denseblock4_denselayer3_conv2 (l__self___features_denseblock4_denselayer3_relu2,) {} - call_function concated_features_45 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88], 1) {} + call_function concated_features_45 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88], 1) {} call_module l__self___features_denseblock4_denselayer4_norm1 L__self___features_denseblock4_denselayer4_norm1 (concated_features_45,) {} call_module l__self___features_denseblock4_denselayer4_relu1 L__self___features_denseblock4_denselayer4_relu1 (l__self___features_denseblock4_denselayer4_norm1,) {} call_module bottleneck_output_90 L__self___features_denseblock4_denselayer4_conv1 (l__self___features_denseblock4_denselayer4_relu1,) {} call_module l__self___features_denseblock4_denselayer4_norm2 L__self___features_denseblock4_denselayer4_norm2 (bottleneck_output_90,) {} call_module l__self___features_denseblock4_denselayer4_relu2 L__self___features_denseblock4_denselayer4_relu2 (l__self___features_denseblock4_denselayer4_norm2,) {} call_module new_features_90 L__self___features_denseblock4_denselayer4_conv2 (l__self___features_denseblock4_denselayer4_relu2,) {} - call_function concated_features_46 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90], 1) {} + call_function concated_features_46 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90], 1) {} call_module l__self___features_denseblock4_denselayer5_norm1 L__self___features_denseblock4_denselayer5_norm1 (concated_features_46,) {} call_module l__self___features_denseblock4_denselayer5_relu1 L__self___features_denseblock4_denselayer5_relu1 (l__self___features_denseblock4_denselayer5_norm1,) {} call_module bottleneck_output_92 L__self___features_denseblock4_denselayer5_conv1 (l__self___features_denseblock4_denselayer5_relu1,) {} call_module l__self___features_denseblock4_denselayer5_norm2 L__self___features_denseblock4_denselayer5_norm2 (bottleneck_output_92,) {} call_module l__self___features_denseblock4_denselayer5_relu2 L__self___features_denseblock4_denselayer5_relu2 (l__self___features_denseblock4_denselayer5_norm2,) {} call_module new_features_92 L__self___features_denseblock4_denselayer5_conv2 (l__self___features_denseblock4_denselayer5_relu2,) {} - call_function concated_features_47 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92], 1) {} + call_function concated_features_47 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92], 1) {} call_module l__self___features_denseblock4_denselayer6_norm1 L__self___features_denseblock4_denselayer6_norm1 (concated_features_47,) {} call_module l__self___features_denseblock4_denselayer6_relu1 L__self___features_denseblock4_denselayer6_relu1 (l__self___features_denseblock4_denselayer6_norm1,) {} call_module bottleneck_output_94 L__self___features_denseblock4_denselayer6_conv1 (l__self___features_denseblock4_denselayer6_relu1,) {} call_module l__self___features_denseblock4_denselayer6_norm2 L__self___features_denseblock4_denselayer6_norm2 (bottleneck_output_94,) {} call_module l__self___features_denseblock4_denselayer6_relu2 L__self___features_denseblock4_denselayer6_relu2 (l__self___features_denseblock4_denselayer6_norm2,) {} call_module new_features_94 L__self___features_denseblock4_denselayer6_conv2 (l__self___features_denseblock4_denselayer6_relu2,) {} - call_function concated_features_48 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94], 1) {} + call_function concated_features_48 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94], 1) {} call_module l__self___features_denseblock4_denselayer7_norm1 L__self___features_denseblock4_denselayer7_norm1 (concated_features_48,) {} call_module l__self___features_denseblock4_denselayer7_relu1 L__self___features_denseblock4_denselayer7_relu1 (l__self___features_denseblock4_denselayer7_norm1,) {} call_module bottleneck_output_96 L__self___features_denseblock4_denselayer7_conv1 (l__self___features_denseblock4_denselayer7_relu1,) {} call_module l__self___features_denseblock4_denselayer7_norm2 L__self___features_denseblock4_denselayer7_norm2 (bottleneck_output_96,) {} call_module l__self___features_denseblock4_denselayer7_relu2 L__self___features_denseblock4_denselayer7_relu2 (l__self___features_denseblock4_denselayer7_norm2,) {} call_module new_features_96 L__self___features_denseblock4_denselayer7_conv2 (l__self___features_denseblock4_denselayer7_relu2,) {} - call_function concated_features_49 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96], 1) {} + call_function concated_features_49 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96], 1) {} call_module l__self___features_denseblock4_denselayer8_norm1 L__self___features_denseblock4_denselayer8_norm1 (concated_features_49,) {} call_module l__self___features_denseblock4_denselayer8_relu1 L__self___features_denseblock4_denselayer8_relu1 (l__self___features_denseblock4_denselayer8_norm1,) {} call_module bottleneck_output_98 L__self___features_denseblock4_denselayer8_conv1 (l__self___features_denseblock4_denselayer8_relu1,) {} call_module l__self___features_denseblock4_denselayer8_norm2 L__self___features_denseblock4_denselayer8_norm2 (bottleneck_output_98,) {} call_module l__self___features_denseblock4_denselayer8_relu2 L__self___features_denseblock4_denselayer8_relu2 (l__self___features_denseblock4_denselayer8_norm2,) {} call_module new_features_98 L__self___features_denseblock4_denselayer8_conv2 (l__self___features_denseblock4_denselayer8_relu2,) {} - call_function concated_features_50 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98], 1) {} + call_function concated_features_50 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98], 1) {} call_module l__self___features_denseblock4_denselayer9_norm1 L__self___features_denseblock4_denselayer9_norm1 (concated_features_50,) {} call_module l__self___features_denseblock4_denselayer9_relu1 L__self___features_denseblock4_denselayer9_relu1 (l__self___features_denseblock4_denselayer9_norm1,) {} call_module bottleneck_output_100 L__self___features_denseblock4_denselayer9_conv1 (l__self___features_denseblock4_denselayer9_relu1,) {} call_module l__self___features_denseblock4_denselayer9_norm2 L__self___features_denseblock4_denselayer9_norm2 (bottleneck_output_100,) {} call_module l__self___features_denseblock4_denselayer9_relu2 L__self___features_denseblock4_denselayer9_relu2 (l__self___features_denseblock4_denselayer9_norm2,) {} call_module new_features_100 L__self___features_denseblock4_denselayer9_conv2 (l__self___features_denseblock4_denselayer9_relu2,) {} - call_function concated_features_51 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100], 1) {} + call_function concated_features_51 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100], 1) {} call_module l__self___features_denseblock4_denselayer10_norm1 L__self___features_denseblock4_denselayer10_norm1 (concated_features_51,) {} call_module l__self___features_denseblock4_denselayer10_relu1 L__self___features_denseblock4_denselayer10_relu1 (l__self___features_denseblock4_denselayer10_norm1,) {} call_module bottleneck_output_102 L__self___features_denseblock4_denselayer10_conv1 (l__self___features_denseblock4_denselayer10_relu1,) {} call_module l__self___features_denseblock4_denselayer10_norm2 L__self___features_denseblock4_denselayer10_norm2 (bottleneck_output_102,) {} call_module l__self___features_denseblock4_denselayer10_relu2 L__self___features_denseblock4_denselayer10_relu2 (l__self___features_denseblock4_denselayer10_norm2,) {} call_module new_features_102 L__self___features_denseblock4_denselayer10_conv2 (l__self___features_denseblock4_denselayer10_relu2,) {} - call_function concated_features_52 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102], 1) {} + call_function concated_features_52 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102], 1) {} call_module l__self___features_denseblock4_denselayer11_norm1 L__self___features_denseblock4_denselayer11_norm1 (concated_features_52,) {} call_module l__self___features_denseblock4_denselayer11_relu1 L__self___features_denseblock4_denselayer11_relu1 (l__self___features_denseblock4_denselayer11_norm1,) {} call_module bottleneck_output_104 L__self___features_denseblock4_denselayer11_conv1 (l__self___features_denseblock4_denselayer11_relu1,) {} call_module l__self___features_denseblock4_denselayer11_norm2 L__self___features_denseblock4_denselayer11_norm2 (bottleneck_output_104,) {} call_module l__self___features_denseblock4_denselayer11_relu2 L__self___features_denseblock4_denselayer11_relu2 (l__self___features_denseblock4_denselayer11_norm2,) {} call_module new_features_104 L__self___features_denseblock4_denselayer11_conv2 (l__self___features_denseblock4_denselayer11_relu2,) {} - call_function concated_features_53 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104], 1) {} + call_function concated_features_53 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104], 1) {} call_module l__self___features_denseblock4_denselayer12_norm1 L__self___features_denseblock4_denselayer12_norm1 (concated_features_53,) {} call_module l__self___features_denseblock4_denselayer12_relu1 L__self___features_denseblock4_denselayer12_relu1 (l__self___features_denseblock4_denselayer12_norm1,) {} call_module bottleneck_output_106 L__self___features_denseblock4_denselayer12_conv1 (l__self___features_denseblock4_denselayer12_relu1,) {} call_module l__self___features_denseblock4_denselayer12_norm2 L__self___features_denseblock4_denselayer12_norm2 (bottleneck_output_106,) {} call_module l__self___features_denseblock4_denselayer12_relu2 L__self___features_denseblock4_denselayer12_relu2 (l__self___features_denseblock4_denselayer12_norm2,) {} call_module new_features_106 L__self___features_denseblock4_denselayer12_conv2 (l__self___features_denseblock4_denselayer12_relu2,) {} - call_function concated_features_54 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106], 1) {} + call_function concated_features_54 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106], 1) {} call_module l__self___features_denseblock4_denselayer13_norm1 L__self___features_denseblock4_denselayer13_norm1 (concated_features_54,) {} call_module l__self___features_denseblock4_denselayer13_relu1 L__self___features_denseblock4_denselayer13_relu1 (l__self___features_denseblock4_denselayer13_norm1,) {} call_module bottleneck_output_108 L__self___features_denseblock4_denselayer13_conv1 (l__self___features_denseblock4_denselayer13_relu1,) {} call_module l__self___features_denseblock4_denselayer13_norm2 L__self___features_denseblock4_denselayer13_norm2 (bottleneck_output_108,) {} call_module l__self___features_denseblock4_denselayer13_relu2 L__self___features_denseblock4_denselayer13_relu2 (l__self___features_denseblock4_denselayer13_norm2,) {} call_module new_features_108 L__self___features_denseblock4_denselayer13_conv2 (l__self___features_denseblock4_denselayer13_relu2,) {} - call_function concated_features_55 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106, new_features_108], 1) {} + call_function concated_features_55 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106, new_features_108], 1) {} call_module l__self___features_denseblock4_denselayer14_norm1 L__self___features_denseblock4_denselayer14_norm1 (concated_features_55,) {} call_module l__self___features_denseblock4_denselayer14_relu1 L__self___features_denseblock4_denselayer14_relu1 (l__self___features_denseblock4_denselayer14_norm1,) {} call_module bottleneck_output_110 L__self___features_denseblock4_denselayer14_conv1 (l__self___features_denseblock4_denselayer14_relu1,) {} call_module l__self___features_denseblock4_denselayer14_norm2 L__self___features_denseblock4_denselayer14_norm2 (bottleneck_output_110,) {} call_module l__self___features_denseblock4_denselayer14_relu2 L__self___features_denseblock4_denselayer14_relu2 (l__self___features_denseblock4_denselayer14_norm2,) {} call_module new_features_110 L__self___features_denseblock4_denselayer14_conv2 (l__self___features_denseblock4_denselayer14_relu2,) {} - call_function concated_features_56 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106, new_features_108, new_features_110], 1) {} + call_function concated_features_56 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106, new_features_108, new_features_110], 1) {} call_module l__self___features_denseblock4_denselayer15_norm1 L__self___features_denseblock4_denselayer15_norm1 (concated_features_56,) {} call_module l__self___features_denseblock4_denselayer15_relu1 L__self___features_denseblock4_denselayer15_relu1 (l__self___features_denseblock4_denselayer15_norm1,) {} call_module bottleneck_output_112 L__self___features_denseblock4_denselayer15_conv1 (l__self___features_denseblock4_denselayer15_relu1,) {} call_module l__self___features_denseblock4_denselayer15_norm2 L__self___features_denseblock4_denselayer15_norm2 (bottleneck_output_112,) {} call_module l__self___features_denseblock4_denselayer15_relu2 L__self___features_denseblock4_denselayer15_relu2 (l__self___features_denseblock4_denselayer15_norm2,) {} call_module new_features_112 L__self___features_denseblock4_denselayer15_conv2 (l__self___features_denseblock4_denselayer15_relu2,) {} - call_function concated_features_57 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106, new_features_108, new_features_110, new_features_112], 1) {} + call_function concated_features_57 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106, new_features_108, new_features_110, new_features_112], 1) {} call_module l__self___features_denseblock4_denselayer16_norm1 L__self___features_denseblock4_denselayer16_norm1 (concated_features_57,) {} call_module l__self___features_denseblock4_denselayer16_relu1 L__self___features_denseblock4_denselayer16_relu1 (l__self___features_denseblock4_denselayer16_norm1,) {} call_module bottleneck_output_114 L__self___features_denseblock4_denselayer16_conv1 (l__self___features_denseblock4_denselayer16_relu1,) {} call_module l__self___features_denseblock4_denselayer16_norm2 L__self___features_denseblock4_denselayer16_norm2 (bottleneck_output_114,) {} call_module l__self___features_denseblock4_denselayer16_relu2 L__self___features_denseblock4_denselayer16_relu2 (l__self___features_denseblock4_denselayer16_norm2,) {} call_module new_features_114 L__self___features_denseblock4_denselayer16_conv2 (l__self___features_denseblock4_denselayer16_relu2,) {} - call_function cat_61 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106, new_features_108, new_features_110, new_features_112, new_features_114], 1) {} + call_function cat_61 ([l__self___features_transition3_pool, new_features_84, new_features_86, new_features_88, new_features_90, new_features_92, new_features_94, new_features_96, new_features_98, new_features_100, new_features_102, new_features_104, new_features_106, new_features_108, new_features_110, new_features_112, new_features_114], 1) {} call_module features L__self___features_norm5 (cat_61,) {} - call_function out (features,) {'inplace': True} - call_function out_1 (out, (1, 1)) {} - call_function out_2 (out_1, 1) {} + call_function out (features,) {'inplace': True} + call_function out_1 (out, (1, 1)) {} + call_function out_2 (out_1, 1) {} call_module out_3 L__self___classifier (out_2,) {} output output output ((out_3,),) {} @@ -1483,7 +1483,7 @@ data-dependent control flow. Consider the function below, where the line ------------- ------ ------------------------------------------------------ ----------- -------- placeholder l_a_ L_a_ () {} placeholder l_b_ L_b_ () {} - call_function abs_1 (l_a_,) {} + call_function abs_1 (l_a_,) {} call_function add (abs_1, 1) {} call_function x (l_a_, add) {} call_method sum_1 sum (l_b_,) {} @@ -1565,7 +1565,7 @@ We can see where TorchDynamo breaks the graph by using ``torch._dynamo.explain`` Ops per Graph: Ops 1: - + @@ -1598,37 +1598,37 @@ We can see where TorchDynamo breaks the graph by using ``torch._dynamo.explain`` Object Weakref: None Guarded Class Weakref: None Guard 4: + Name: "G['torch'].abs" + Source: global + Create Function: FUNCTION_MATCH + Guard Types: None + Code List: None + Object Weakref: None + Guarded Class Weakref: None + Guard 5: + Name: '' + Source: shape_env + Create Function: SHAPE_ENV + Guard Types: None + Code List: None + Object Weakref: None + Guarded Class Weakref: None + Guard 6: Name: "L['b']" Source: local Create Function: TENSOR_MATCH Guard Types: ['TENSOR_MATCH'] Code List: ["hasattr(L['b'], '_dynamo_dynamic_indices') == False"] - Object Weakref: - Guarded Class Weakref: - Guard 5: + Object Weakref: + Guarded Class Weakref: + Guard 7: Name: '' Source: global Create Function: BACKEND_MATCH Guard Types: ['BACKEND_MATCH'] - Code List: ['___check_current_backend(139697687467712)'] + Code List: ['___check_current_backend(140197382899168)'] Object Weakref: None Guarded Class Weakref: None - Guard 6: - Name: "G['torch'].abs" - Source: global - Create Function: FUNCTION_MATCH - Guard Types: None - Code List: None - Object Weakref: None - Guarded Class Weakref: None - Guard 7: - Name: "L['a']" - Source: local - Create Function: TENSOR_MATCH - Guard Types: ['TENSOR_MATCH'] - Code List: ["hasattr(L['a'], '_dynamo_dynamic_indices') == False"] - Object Weakref: - Guarded Class Weakref: Guard 8: Name: '' Source: global @@ -1638,14 +1638,6 @@ We can see where TorchDynamo breaks the graph by using ``torch._dynamo.explain`` Object Weakref: None Guarded Class Weakref: None Guard 9: - Name: '' - Source: shape_env - Create Function: SHAPE_ENV - Guard Types: None - Code List: None - Object Weakref: None - Guarded Class Weakref: None - Guard 10: Name: '' Source: global Create Function: DETERMINISTIC_ALGORITHMS @@ -1653,6 +1645,14 @@ We can see where TorchDynamo breaks the graph by using ``torch._dynamo.explain`` Code List: None Object Weakref: None Guarded Class Weakref: None + Guard 10: + Name: "L['a']" + Source: local + Create Function: TENSOR_MATCH + Guard Types: ['TENSOR_MATCH'] + Code List: ["hasattr(L['a'], '_dynamo_dynamic_indices') == False"] + Object Weakref: + Guarded Class Weakref: Guard 11: Name: '' Source: global @@ -1671,44 +1671,44 @@ We can see where TorchDynamo breaks the graph by using ``torch._dynamo.explain`` Guarded Class Weakref: None Guard 13: Name: '' - Source: global - Create Function: BACKEND_MATCH - Guard Types: ['BACKEND_MATCH'] - Code List: ['___check_current_backend(139697687467712)'] + Source: shape_env + Create Function: SHAPE_ENV + Guard Types: None + Code List: None Object Weakref: None Guarded Class Weakref: None Guard 14: Name: '' Source: global - Create Function: GRAD_MODE - Guard Types: None - Code List: None + Create Function: BACKEND_MATCH + Guard Types: ['BACKEND_MATCH'] + Code List: ['___check_current_backend(140197382899168)'] Object Weakref: None Guarded Class Weakref: None Guard 15: - Name: "L['x']" + Name: "L['b']" Source: local Create Function: TENSOR_MATCH Guard Types: ['TENSOR_MATCH'] - Code List: ["hasattr(L['x'], '_dynamo_dynamic_indices') == False"] - Object Weakref: - Guarded Class Weakref: + Code List: ["hasattr(L['b'], '_dynamo_dynamic_indices') == False"] + Object Weakref: + Guarded Class Weakref: Guard 16: Name: '' - Source: shape_env - Create Function: SHAPE_ENV + Source: global + Create Function: GRAD_MODE Guard Types: None Code List: None Object Weakref: None Guarded Class Weakref: None Guard 17: - Name: "L['b']" + Name: "L['x']" Source: local Create Function: TENSOR_MATCH Guard Types: ['TENSOR_MATCH'] - Code List: ["hasattr(L['b'], '_dynamo_dynamic_indices') == False"] - Object Weakref: - Guarded Class Weakref: + Code List: ["hasattr(L['x'], '_dynamo_dynamic_indices') == False"] + Object Weakref: + Guarded Class Weakref: Guard 18: Name: '' Source: global @@ -1720,8 +1720,8 @@ We can see where TorchDynamo breaks the graph by using ``torch._dynamo.explain`` Compile Times: TorchDynamo compilation metrics: Function Runtimes (s) ------------------------------- -------------- - _compile..compile_inner 0.0113, 0.0068 - OutputGraph.call_user_compiler 0.0001, 0.0001 + _compile..compile_inner 0.0110, 0.0066 + OutputGraph.call_user_compiler 0.0001, 0.0000 @@ -1855,7 +1855,7 @@ with FX graphs. We hope that you will give ``torch.compile`` a try! .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 6 minutes 15.332 seconds) + **Total running time of the script:** ( 6 minutes 10.264 seconds) .. _sphx_glr_download_intermediate_torch_compile_tutorial.py: diff --git a/_sources/intermediate/torchvision_tutorial.rst.txt b/_sources/intermediate/torchvision_tutorial.rst.txt index 2a4b22e3f8..aa4ebc4a90 100644 --- a/_sources/intermediate/torchvision_tutorial.rst.txt +++ b/_sources/intermediate/torchvision_tutorial.rst.txt @@ -157,7 +157,7 @@ Here is one example of a pair of images and segmentation masks .. code-block:: none - + @@ -321,12 +321,12 @@ way of doing it: Downloading: "https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth" to /var/lib/ci-user/.cache/torch/hub/checkpoints/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth 0%| | 0.00/160M [00:00 + @@ -851,7 +853,7 @@ the torchvision repository. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 49.967 seconds) + **Total running time of the script:** ( 0 minutes 49.667 seconds) .. _sphx_glr_download_intermediate_torchvision_tutorial.py: diff --git a/_sources/prototype/maskedtensor_overview.rst.txt b/_sources/prototype/maskedtensor_overview.rst.txt index ae31775523..a03fef9dcc 100644 --- a/_sources/prototype/maskedtensor_overview.rst.txt +++ b/_sources/prototype/maskedtensor_overview.rst.txt @@ -771,7 +771,7 @@ to see how MaskedTensor enables sparsity and the different storage formats we cu .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.051 seconds) + **Total running time of the script:** ( 0 minutes 0.048 seconds) .. _sphx_glr_download_prototype_maskedtensor_overview.py: diff --git a/_sources/prototype/sg_execution_times.rst.txt b/_sources/prototype/sg_execution_times.rst.txt index 9aaaa6c811..4ca6de7c4d 100644 --- a/_sources/prototype/sg_execution_times.rst.txt +++ b/_sources/prototype/sg_execution_times.rst.txt @@ -5,17 +5,17 @@ Computation times ================= -**00:00.055** total execution time for **prototype** files: +**00:00.052** total execution time for **prototype** files: +-----------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_prototype_maskedtensor_overview.py` (``maskedtensor_overview.py``) | 00:00.051 | 0.0 MB | +| :ref:`sphx_glr_prototype_maskedtensor_overview.py` (``maskedtensor_overview.py``) | 00:00.048 | 0.0 MB | +-----------------------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_prototype_gpu_quantization_torchao_tutorial.py` (``gpu_quantization_torchao_tutorial.py``) | 00:00.001 | 0.0 MB | +-----------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_prototype_maskedtensor_adagrad.py` (``maskedtensor_adagrad.py``) | 00:00.001 | 0.0 MB | -+-----------------------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_prototype_numeric_suite_tutorial.py` (``numeric_suite_tutorial.py``) | 00:00.001 | 0.0 MB | +-----------------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_prototype_maskedtensor_adagrad.py` (``maskedtensor_adagrad.py``) | 00:00.001 | 0.0 MB | ++-----------------------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_prototype_maskedtensor_advanced_semantics.py` (``maskedtensor_advanced_semantics.py``) | 00:00.001 | 0.0 MB | +-----------------------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_prototype_maskedtensor_sparsity.py` (``maskedtensor_sparsity.py``) | 00:00.000 | 0.0 MB | diff --git a/_sources/recipes/compiling_optimizer_lr_scheduler.rst.txt b/_sources/recipes/compiling_optimizer_lr_scheduler.rst.txt index 60d53bea26..264c688f01 100644 --- a/_sources/recipes/compiling_optimizer_lr_scheduler.rst.txt +++ b/_sources/recipes/compiling_optimizer_lr_scheduler.rst.txt @@ -167,31 +167,31 @@ LR in a tensor. .. code-block:: none - V0710 20:34:52.145000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 - V0710 20:34:52.145000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): - V0710 20:34:52.145000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - ___key_to_id(L['self'].state) == [139697658389552,139697658381872,139699618042304,139699618052544,139699618038144,139699618038864,139699618052064,139699618047184,139699618039984,139699618043984] - V0710 20:34:55.450000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 - V0710 20:34:55.450000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): - V0710 20:34:55.450000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.003333333333333333 - V0710 20:34:55.450000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - G['__optimizer_139699628395200_139697656684496_c59']() is not None - V0710 20:34:58.016000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 - V0710 20:34:58.016000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): - V0710 20:34:58.016000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.004666666666666667 - V0710 20:34:58.016000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.003333333333333333 - V0710 20:34:58.016000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - G['__optimizer_139699628395200_139697656684496_c59']() is not None - V0710 20:35:00.566000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 - V0710 20:35:00.566000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): - V0710 20:35:00.566000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.006000000000000001 - V0710 20:35:00.566000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.004666666666666667 - V0710 20:35:00.566000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.003333333333333333 - V0710 20:35:00.566000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - G['__optimizer_139699628395200_139697656684496_c59']() is not None - V0710 20:35:03.105000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 - V0710 20:35:03.105000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): - V0710 20:35:03.105000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.007333333333333335 - V0710 20:35:03.105000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.006000000000000001 - V0710 20:35:03.105000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.004666666666666667 - V0710 20:35:03.105000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.003333333333333333 - V0710 20:35:03.105000 139706689843840 torch/_dynamo/guards.py:1425] [__recompiles] - G['__optimizer_139699628395200_139697656684496_c59']() is not None + V0715 17:19:09.565000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 + V0715 17:19:09.565000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): + V0715 17:19:09.565000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - ___key_to_id(L['self'].state) == [140197331170656,140197331160736,140197330074128,140197330070048,140197330064288,140197330060688,140197330073888,140197330063808,140197330074448,140197330067408] + V0715 17:19:12.850000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 + V0715 17:19:12.850000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): + V0715 17:19:12.850000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.003333333333333333 + V0715 17:19:12.850000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - G['__optimizer_140197482626688_140197351566336_c59']() is not None + V0715 17:19:15.378000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 + V0715 17:19:15.378000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): + V0715 17:19:15.378000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.004666666666666667 + V0715 17:19:15.378000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.003333333333333333 + V0715 17:19:15.378000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - G['__optimizer_140197482626688_140197351566336_c59']() is not None + V0715 17:19:17.898000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 + V0715 17:19:17.898000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): + V0715 17:19:17.898000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.006000000000000001 + V0715 17:19:17.898000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.004666666666666667 + V0715 17:19:17.898000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.003333333333333333 + V0715 17:19:17.898000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - G['__optimizer_140197482626688_140197351566336_c59']() is not None + V0715 17:19:20.418000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] Recompiling function step in /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/optim/adam.py:135 + V0715 17:19:20.418000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] triggered by the following guard failure(s): + V0715 17:19:20.418000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.007333333333333335 + V0715 17:19:20.418000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.006000000000000001 + V0715 17:19:20.418000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.004666666666666667 + V0715 17:19:20.418000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - L['self'].param_groups[0]['lr'] == 0.003333333333333333 + V0715 17:19:20.418000 140206346437248 torch/_dynamo/guards.py:1425] [__recompiles] - G['__optimizer_140197482626688_140197351566336_c59']() is not None @@ -219,7 +219,7 @@ See also: .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 16.970 seconds) + **Total running time of the script:** ( 0 minutes 16.786 seconds) .. _sphx_glr_download_recipes_compiling_optimizer_lr_scheduler.py: diff --git a/_sources/recipes/recipes/changing_default_device.rst.txt b/_sources/recipes/recipes/changing_default_device.rst.txt index 558ef007c0..69178f3e80 100644 --- a/_sources/recipes/recipes/changing_default_device.rst.txt +++ b/_sources/recipes/recipes/changing_default_device.rst.txt @@ -128,7 +128,7 @@ is causing problems for you, please comment on .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.100 seconds) + **Total running time of the script:** ( 0 minutes 0.187 seconds) .. _sphx_glr_download_recipes_recipes_changing_default_device.py: diff --git a/_sources/recipes/recipes/module_load_state_dict_tips.rst.txt b/_sources/recipes/recipes/module_load_state_dict_tips.rst.txt index 12309b1d2b..a560d191bd 100644 --- a/_sources/recipes/recipes/module_load_state_dict_tips.rst.txt +++ b/_sources/recipes/recipes/module_load_state_dict_tips.rst.txt @@ -157,7 +157,7 @@ loaded into CPU RAM, which can be undesirable when: .. code-block:: none - loading time without mmap=0.013105154037475586 + loading time without mmap=0.011459827423095703 @@ -189,7 +189,7 @@ storages will be memory-mapped. .. code-block:: none - loading time with mmap=0.0009913444519042969 + loading time with mmap=0.0009703636169433594 @@ -352,7 +352,7 @@ be used to aid when loading a model from a checkpoint. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.393 seconds) + **Total running time of the script:** ( 0 minutes 0.401 seconds) .. _sphx_glr_download_recipes_recipes_module_load_state_dict_tips.py: diff --git a/_sources/recipes/recipes/reasoning_about_shapes.rst.txt b/_sources/recipes/recipes/reasoning_about_shapes.rst.txt index 8628fb2938..bcb3d36506 100644 --- a/_sources/recipes/recipes/reasoning_about_shapes.rst.txt +++ b/_sources/recipes/recipes/reasoning_about_shapes.rst.txt @@ -60,7 +60,7 @@ of a layer without materializing any data. .. code-block:: none tensor(..., device='meta', size=(2, 5, 9, 9), grad_fn=) - Time taken: 0.00010952199954772368 + Time taken: 0.00013915199997427408 @@ -94,7 +94,7 @@ inputs will not significantly alter the time taken for shape computation. tensor(..., device='meta', size=(1024, 5, 65535, 65535), grad_fn=) - Time taken: 7.485100286430679e-05 + Time taken: 7.251199986058054e-05 diff --git a/_sources/recipes/recipes/swap_tensors.rst.txt b/_sources/recipes/recipes/swap_tensors.rst.txt index 2aaa2d45d6..39c61ec5c8 100644 --- a/_sources/recipes/recipes/swap_tensors.rst.txt +++ b/_sources/recipes/recipes/swap_tensors.rst.txt @@ -217,8 +217,8 @@ of the subclass' payload (``elem``) does not change. .. code-block:: none - Before: id(m.weight)=140696568569168, id(m.bias)=140696568570368 - After: id(m.weight)=140696568569168, id(m.bias)=140696568570368 + Before: id(m.weight)=140047952326592, id(m.bias)=140047952316112 + After: id(m.weight)=140047952326592, id(m.bias)=140047952316112 m.weight.dtype: torch.bfloat16 m.weight.elem.dtype: torch.float32 m.bias.dtype: torch.bfloat16 @@ -259,8 +259,8 @@ the ``dtype`` of the payload is properly converted. .. code-block:: none - Before: id(m.weight)=140696568564928, id(m.bias)=140697881523152 - After: id(m.weight)=140696568564928, id(m.bias)=140697881523152 + Before: id(m.weight)=140047952315792, id(m.bias)=140048951271136 + After: id(m.weight)=140047952315792, id(m.bias)=140048951271136 m.weight.dtype: torch.bfloat16 m.weight.elem.dtype: torch.bfloat16 m.bias.dtype: torch.bfloat16 @@ -392,7 +392,7 @@ for biases, we want to preserve the properties of the tensor in the ``state_dict .. code-block:: none - Before: id(weight)=140696568564608, id(bias)=140696562752768 + Before: id(weight)=140047952325952, id(bias)=140047952328032 m.state_dict() before load_state_dict(): OrderedDict([('weight', MyQuantizedLinearWeight(tensor(..., device='meta', size=(5, 3)), scale=0.5)), ('bias', tensor(..., device='meta', size=(5,)))]) state_dict: @@ -401,7 +401,7 @@ for biases, we want to preserve the properties of the tensor in the ``state_dict [ 0.2932, -0.3519, -0.5715], [-0.2231, -0.4428, 0.4737], [ 0.1663, 0.2391, 0.1826]])), ('bias', tensor([-0.0100, 0.4518, -0.4102, 0.0364, -0.3941]))]) - After: id(weight)=140696568564608, id(bias)=140696562752768 + After: id(weight)=140047952325952, id(bias)=140047952328032 m.state_dict() after load_state_dict(): OrderedDict([('weight', MyQuantizedLinearWeight(tensor([[ 0.2430, 0.5155, 0.3337], [-0.2524, 0.3333, 0.1033], @@ -431,7 +431,7 @@ use the two new extension points that are gated by .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.018 seconds) + **Total running time of the script:** ( 0 minutes 0.020 seconds) .. _sphx_glr_download_recipes_recipes_swap_tensors.py: diff --git a/_sources/recipes/torch_compile_user_defined_triton_kernel_tutorial.rst.txt b/_sources/recipes/torch_compile_user_defined_triton_kernel_tutorial.rst.txt index 3d73a1a726..afcbb26ea2 100644 --- a/_sources/recipes/torch_compile_user_defined_triton_kernel_tutorial.rst.txt +++ b/_sources/recipes/torch_compile_user_defined_triton_kernel_tutorial.rst.txt @@ -249,7 +249,7 @@ See Also .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 1.481 seconds) + **Total running time of the script:** ( 0 minutes 1.459 seconds) .. _sphx_glr_download_recipes_torch_compile_user_defined_triton_kernel_tutorial.py: diff --git a/advanced/coding_ddpg.html b/advanced/coding_ddpg.html index 3084c7be98..4d9b2b481e 100644 --- a/advanced/coding_ddpg.html +++ b/advanced/coding_ddpg.html @@ -1558,26 +1558,26 @@

Time to train the policy
  0%|          | 0/10000 [00:00<?, ?it/s]
-  8%|8         | 800/10000 [00:00<00:08, 1071.38it/s]
- 16%|#6        | 1600/10000 [00:03<00:20, 407.89it/s]
- 24%|##4       | 2400/10000 [00:04<00:13, 555.14it/s]
- 32%|###2      | 3200/10000 [00:05<00:10, 672.45it/s]
- 40%|####      | 4000/10000 [00:05<00:07, 758.59it/s]
- 48%|####8     | 4800/10000 [00:06<00:06, 823.20it/s]
- 56%|#####6    | 5600/10000 [00:07<00:05, 863.77it/s]
-reward: -2.09 (r0 = -3.53), reward eval: reward:  0.00, reward normalized=-2.69/6.20, grad norm= 37.11, loss_value= 323.10, loss_actor= 14.83, target value: -16.38:  56%|#####6    | 5600/10000 [00:08<00:05, 863.77it/s]
-reward: -2.09 (r0 = -3.53), reward eval: reward:  0.00, reward normalized=-2.69/6.20, grad norm= 37.11, loss_value= 323.10, loss_actor= 14.83, target value: -16.38:  64%|######4   | 6400/10000 [00:09<00:05, 650.90it/s]
-reward: -0.14 (r0 = -3.53), reward eval: reward:  0.00, reward normalized=-2.66/5.98, grad norm= 175.37, loss_value= 309.10, loss_actor= 13.68, target value: -16.84:  64%|######4   | 6400/10000 [00:10<00:05, 650.90it/s]
-reward: -0.14 (r0 = -3.53), reward eval: reward:  0.00, reward normalized=-2.66/5.98, grad norm= 175.37, loss_value= 309.10, loss_actor= 13.68, target value: -16.84:  72%|#######2  | 7200/10000 [00:12<00:05, 492.74it/s]
-reward: -3.13 (r0 = -3.53), reward eval: reward:  0.00, reward normalized=-2.46/6.23, grad norm= 93.87, loss_value= 320.85, loss_actor= 15.84, target value: -15.84:  72%|#######2  | 7200/10000 [00:12<00:05, 492.74it/s]
-reward: -3.13 (r0 = -3.53), reward eval: reward:  0.00, reward normalized=-2.46/6.23, grad norm= 93.87, loss_value= 320.85, loss_actor= 15.84, target value: -15.84:  80%|########  | 8000/10000 [00:14<00:04, 423.87it/s]
-reward: -4.76 (r0 = -3.53), reward eval: reward:  0.00, reward normalized=-2.58/5.59, grad norm= 122.85, loss_value= 264.06, loss_actor= 18.60, target value: -16.97:  80%|########  | 8000/10000 [00:15<00:04, 423.87it/s]
-reward: -4.76 (r0 = -3.53), reward eval: reward:  0.00, reward normalized=-2.58/5.59, grad norm= 122.85, loss_value= 264.06, loss_actor= 18.60, target value: -16.97:  88%|########8 | 8800/10000 [00:16<00:03, 388.78it/s]
-reward: -5.06 (r0 = -3.53), reward eval: reward: -3.39, reward normalized=-3.02/5.46, grad norm= 64.96, loss_value= 254.81, loss_actor= 16.05, target value: -19.83:  88%|########8 | 8800/10000 [00:19<00:03, 388.78it/s]
-reward: -5.06 (r0 = -3.53), reward eval: reward: -3.39, reward normalized=-3.02/5.46, grad norm= 64.96, loss_value= 254.81, loss_actor= 16.05, target value: -19.83:  96%|#########6| 9600/10000 [00:21<00:01, 295.31it/s]
-reward: -2.03 (r0 = -3.53), reward eval: reward: -3.39, reward normalized=-2.73/5.39, grad norm= 61.65, loss_value= 302.91, loss_actor= 15.41, target value: -19.70:  96%|#########6| 9600/10000 [00:21<00:01, 295.31it/s]
-reward: -2.03 (r0 = -3.53), reward eval: reward: -3.39, reward normalized=-2.73/5.39, grad norm= 61.65, loss_value= 302.91, loss_actor= 15.41, target value: -19.70: : 10400it [00:24, 270.03it/s]
-reward: -3.56 (r0 = -3.53), reward eval: reward: -3.39, reward normalized=-3.00/4.17, grad norm= 83.71, loss_value= 141.01, loss_actor= 19.28, target value: -21.77: : 10400it [00:25, 270.03it/s]
+  8%|8         | 800/10000 [00:00<00:08, 1081.90it/s]
+ 16%|#6        | 1600/10000 [00:03<00:20, 409.91it/s]
+ 24%|##4       | 2400/10000 [00:04<00:13, 556.99it/s]
+ 32%|###2      | 3200/10000 [00:05<00:10, 672.82it/s]
+ 40%|####      | 4000/10000 [00:05<00:07, 758.91it/s]
+ 48%|####8     | 4800/10000 [00:06<00:06, 823.27it/s]
+ 56%|#####6    | 5600/10000 [00:07<00:05, 861.64it/s]
+reward: -2.39 (r0 = -1.80), reward eval: reward: -0.00, reward normalized=-2.64/6.17, grad norm= 60.66, loss_value= 362.98, loss_actor= 15.39, target value: -16.75:  56%|#####6    | 5600/10000 [00:08<00:05, 861.64it/s]
+reward: -2.39 (r0 = -1.80), reward eval: reward: -0.00, reward normalized=-2.64/6.17, grad norm= 60.66, loss_value= 362.98, loss_actor= 15.39, target value: -16.75:  64%|######4   | 6400/10000 [00:09<00:05, 640.70it/s]
+reward: -0.18 (r0 = -1.80), reward eval: reward: -0.00, reward normalized=-1.78/5.68, grad norm= 140.91, loss_value= 263.30, loss_actor= 12.55, target value: -10.25:  64%|######4   | 6400/10000 [00:10<00:05, 640.70it/s]
+reward: -0.18 (r0 = -1.80), reward eval: reward: -0.00, reward normalized=-1.78/5.68, grad norm= 140.91, loss_value= 263.30, loss_actor= 12.55, target value: -10.25:  72%|#######2  | 7200/10000 [00:12<00:05, 493.25it/s]
+reward: -1.33 (r0 = -1.80), reward eval: reward: -0.00, reward normalized=-2.30/5.80, grad norm= 88.53, loss_value= 234.86, loss_actor= 10.93, target value: -14.15:  72%|#######2  | 7200/10000 [00:12<00:05, 493.25it/s]
+reward: -1.33 (r0 = -1.80), reward eval: reward: -0.00, reward normalized=-2.30/5.80, grad norm= 88.53, loss_value= 234.86, loss_actor= 10.93, target value: -14.15:  80%|########  | 8000/10000 [00:14<00:04, 425.82it/s]
+reward: -4.81 (r0 = -1.80), reward eval: reward: -0.00, reward normalized=-2.33/4.87, grad norm= 66.83, loss_value= 191.09, loss_actor= 17.33, target value: -15.26:  80%|########  | 8000/10000 [00:15<00:04, 425.82it/s]
+reward: -4.81 (r0 = -1.80), reward eval: reward: -0.00, reward normalized=-2.33/4.87, grad norm= 66.83, loss_value= 191.09, loss_actor= 17.33, target value: -15.26:  88%|########8 | 8800/10000 [00:16<00:03, 389.80it/s]
+reward: -5.27 (r0 = -1.80), reward eval: reward: -5.60, reward normalized=-2.75/5.32, grad norm= 92.09, loss_value= 224.51, loss_actor= 14.97, target value: -18.42:  88%|########8 | 8800/10000 [00:19<00:03, 389.80it/s]
+reward: -5.27 (r0 = -1.80), reward eval: reward: -5.60, reward normalized=-2.75/5.32, grad norm= 92.09, loss_value= 224.51, loss_actor= 14.97, target value: -18.42:  96%|#########6| 9600/10000 [00:21<00:01, 288.58it/s]
+reward: -4.15 (r0 = -1.80), reward eval: reward: -5.60, reward normalized=-2.69/4.98, grad norm= 116.94, loss_value= 181.23, loss_actor= 15.28, target value: -19.73:  96%|#########6| 9600/10000 [00:22<00:01, 288.58it/s]
+reward: -4.15 (r0 = -1.80), reward eval: reward: -5.60, reward normalized=-2.69/4.98, grad norm= 116.94, loss_value= 181.23, loss_actor= 15.28, target value: -19.73: : 10400it [00:24, 267.97it/s]
+reward: -4.63 (r0 = -1.80), reward eval: reward: -5.60, reward normalized=-3.42/4.15, grad norm= 91.74, loss_value= 184.88, loss_actor= 23.50, target value: -23.91: : 10400it [00:25, 267.97it/s]
 
@@ -1621,7 +1621,7 @@

Next Steps[Feature] Distpatch IQL loss module.)

  • Allowing flexible TensorDict keys.

  • -

    Total running time of the script: ( 0 minutes 40.968 seconds)

    +

    Total running time of the script: ( 0 minutes 40.982 seconds)

    The last thing we need to build the example application is the LibTorch diff --git a/advanced/dynamic_quantization_tutorial.html b/advanced/dynamic_quantization_tutorial.html index 1fa4d112f3..f31b82e1f2 100644 --- a/advanced/dynamic_quantization_tutorial.html +++ b/advanced/dynamic_quantization_tutorial.html @@ -837,9 +837,9 @@

    4. Test dynamic quantization
    loss: 5.167
    -elapsed time (seconds): 203.1
    +elapsed time (seconds): 203.8
     loss: 5.168
    -elapsed time (seconds): 112.0
    +elapsed time (seconds): 113.0
     

    Running this locally on a MacBook Pro, without quantization, inference takes about 200 seconds, @@ -851,7 +851,7 @@

    Conclusionhere if you have any.

    -

    Total running time of the script: ( 5 minutes 23.825 seconds)

    +

    Total running time of the script: ( 5 minutes 25.353 seconds)

    @@ -2121,7 +2128,7 @@

    Evaluating the resultsAcknowledgments

    We thank the TorchX team (in particular Kiuk Chung and Tristan Rice) for their help with integrating TorchX with Ax.

    -

    Total running time of the script: ( 16 minutes 15.239 seconds)

    +

    Total running time of the script: ( 16 minutes 58.398 seconds)

    5000 5% (0m 34s) 2.2208 Horigome / Japanese ✓
    -10000 10% (1m 8s) 1.6752 Miazga / Japanese ✗ (Polish)
    -15000 15% (1m 44s) 0.1778 Yukhvidov / Russian ✓
    -20000 20% (2m 19s) 1.5856 Mclaughlin / Irish ✗ (Scottish)
    -25000 25% (2m 54s) 0.6552 Banh / Vietnamese ✓
    -30000 30% (3m 29s) 1.5547 Machado / Japanese ✗ (Portuguese)
    -35000 35% (4m 4s) 0.0168 Fotopoulos / Greek ✓
    -40000 40% (4m 39s) 1.1464 Quirke / Irish ✓
    -45000 45% (5m 13s) 1.7532 Reier / French ✗ (German)
    -50000 50% (5m 48s) 0.8413 Hou / Chinese ✓
    -55000 55% (6m 23s) 0.8587 Duan / Vietnamese ✗ (Chinese)
    -60000 60% (6m 58s) 0.2047 Giang / Vietnamese ✓
    -65000 65% (7m 32s) 2.5534 Cober / French ✗ (Czech)
    -70000 70% (8m 7s) 1.5163 Mateus / Arabic ✗ (Portuguese)
    -75000 75% (8m 42s) 0.2217 Hamilton / Scottish ✓
    -80000 80% (9m 16s) 0.4456 Maessen / Dutch ✓
    -85000 85% (9m 51s) 0.0239 Gan / Chinese ✓
    -90000 90% (10m 26s) 0.0521 Bellomi / Italian ✓
    -95000 95% (11m 0s) 0.0867 Vozgov / Russian ✓
    -100000 100% (11m 35s) 0.2730 Tong / Vietnamese ✓
    +10000 10% (1m 10s) 1.6752 Miazga / Japanese ✗ (Polish)
    +15000 15% (1m 46s) 0.1778 Yukhvidov / Russian ✓
    +20000 20% (2m 21s) 1.5856 Mclaughlin / Irish ✗ (Scottish)
    +25000 25% (2m 57s) 0.6552 Banh / Vietnamese ✓
    +30000 30% (3m 32s) 1.5547 Machado / Japanese ✗ (Portuguese)
    +35000 35% (4m 8s) 0.0168 Fotopoulos / Greek ✓
    +40000 40% (4m 43s) 1.1464 Quirke / Irish ✓
    +45000 45% (5m 19s) 1.7532 Reier / French ✗ (German)
    +50000 50% (5m 54s) 0.8413 Hou / Chinese ✓
    +55000 55% (6m 29s) 0.8587 Duan / Vietnamese ✗ (Chinese)
    +60000 60% (7m 5s) 0.2047 Giang / Vietnamese ✓
    +65000 65% (7m 41s) 2.5534 Cober / French ✗ (Czech)
    +70000 70% (8m 16s) 1.5163 Mateus / Arabic ✗ (Portuguese)
    +75000 75% (8m 51s) 0.2217 Hamilton / Scottish ✓
    +80000 80% (9m 27s) 0.4456 Maessen / Dutch ✓
    +85000 85% (10m 2s) 0.0239 Gan / Chinese ✓
    +90000 90% (10m 37s) 0.0521 Bellomi / Italian ✓
    +95000 95% (11m 13s) 0.0867 Vozgov / Russian ✓
    +100000 100% (11m 49s) 0.2730 Tong / Vietnamese ✓
     
    @@ -888,7 +888,7 @@

    Plotting the Resultsplt.plot(all_losses) -char rnn classification tutorial
    [<matplotlib.lines.Line2D object at 0x7f1eae0199f0>]
    +char rnn classification tutorial
    [<matplotlib.lines.Line2D object at 0x7f4da778fca0>]
     
    @@ -1026,7 +1026,7 @@

    ExercisesTotal running time of the script: ( 11 minutes 48.189 seconds)

    +

    Total running time of the script: ( 12 minutes 1.675 seconds)

    -

    -char rnn generation tutorial
    [<matplotlib.lines.Line2D object at 0x7f01b8758dc0>]
    +char rnn generation tutorial
    [<matplotlib.lines.Line2D object at 0x7fc284510040>]
     
    @@ -922,7 +922,7 @@

    ExercisesTotal running time of the script: ( 13 minutes 11.262 seconds)

    +

    Total running time of the script: ( 13 minutes 46.615 seconds)