From e7c36568d9a2b1749a3b79772e0e22551860678a Mon Sep 17 00:00:00 2001 From: Arne Symons Date: Tue, 19 Nov 2024 16:42:44 +0100 Subject: [PATCH] update lab files to new accelerator1.yaml --- lab1/inputs/hardware/accelerator1.yaml | 124 ++++++++++++++++++ lab1/{ => inputs/mapping}/mapping.yaml | 0 .../workload}/resnet18_first_layer.onnx | Bin lab1/main.py | 9 +- lab2/inputs/hardware/accelerator1.yaml | 124 ++++++++++++++++++ lab2/{ => inputs/mapping}/mapping.yaml | 0 .../workload}/resnet18_first_layer.onnx | Bin lab2/main.py | 8 +- lab3/inputs/hardware/accelerator1.yaml | 16 +-- lab3/inputs/hardware/accelerator2.yaml | 16 +-- lab3/inputs/hardware/accelerator3.yaml | 16 +-- 11 files changed, 282 insertions(+), 31 deletions(-) create mode 100644 lab1/inputs/hardware/accelerator1.yaml rename lab1/{ => inputs/mapping}/mapping.yaml (100%) rename lab1/{ => inputs/workload}/resnet18_first_layer.onnx (100%) create mode 100644 lab2/inputs/hardware/accelerator1.yaml rename lab2/{ => inputs/mapping}/mapping.yaml (100%) rename lab2/{ => inputs/workload}/resnet18_first_layer.onnx (100%) diff --git a/lab1/inputs/hardware/accelerator1.yaml b/lab1/inputs/hardware/accelerator1.yaml new file mode 100644 index 000000000..895a97283 --- /dev/null +++ b/lab1/inputs/hardware/accelerator1.yaml @@ -0,0 +1,124 @@ +name: accelerator1 + +operational_array: + unit_energy: 0.04 # pJ + unit_area: 1 # unit + dimensions: [D1, D2] + sizes: [32, 32] + +memories: + rf_1B: + size: 8 + r_bw: 8 + w_bw: 8 + r_cost: 0.095 # TODO + w_cost: 0.095 # TODO + area: 0 + r_port: 1 + w_port: 1 + rw_port: 0 + latency: 1 + auto_cost_extraction: False + operands: [I2] + ports: + - fh: w_port_1 + tl: r_port_1 + served_dimensions: [] # Fully unrolled over all multipliers + + rf_1B: + size: 8 + r_bw: 8 + w_bw: 8 + r_cost: 0.095 # TODO + w_cost: 0.095 # TODO + area: 0 + r_port: 1 + w_port: 1 + rw_port: 0 + latency: 1 + auto_cost_extraction: False + operands: [I1] + ports: + - fh: w_port_1 + tl: r_port_1 + served_dimensions: [D2] # One RF per column + + rf_4B: + size: 32 + r_bw: 32 + w_bw: 32 + r_cost: 0.021 # TODO + w_cost: 0.021 # TODO + area: 0 + r_port: 2 + w_port: 2 + rw_port: 0 + latency: 1 + operands: [O] + ports: + - fh: w_port_1 + tl: r_port_1 + fl: w_port_2 + th: r_port_2 + served_dimensions: [D1] # One RF per row + + sram_16KB_I2: + size: 131072 + r_bw: 256 + w_bw: 256 + r_cost: 10.5 + w_cost: 12.8 + area: 0 + r_port: 1 + w_port: 1 + rw_port: 0 + latency: 1 + operands: [I2] + ports: + - fh: w_port_1 + tl: r_port_1 + served_dimensions: [D1, D2] + + sram_64KB_I1_O: + size: 524288 + r_bw: 512 + w_bw: 512 + r_cost: 19.6 + w_cost: 22.5 + area: 0 + r_port: 0 + w_port: 0 + rw_port: 2 + latency: 1 + operands: [I1, O] + ports: + - fh: rw_port_1 + tl: rw_port_1 + - fh: rw_port_1 + tl: rw_port_1 + fl: rw_port_2 + th: rw_port_2 + served_dimensions: [D1, D2] + + dram: + size: 4294967296 + r_bw: 64 + w_bw: 64 + r_cost: 100 # TODO + w_cost: 150 # TODO + area: 0 + r_port: 0 + w_port: 0 + rw_port: 1 + latency: 1 + operands: [I1, I2, O] + ports: + - fh: rw_port_1 + tl: rw_port_1 + - fh: rw_port_1 + tl: rw_port_1 + - fh: rw_port_1 + tl: rw_port_1 + fl: rw_port_1 + th: rw_port_1 + served_dimensions: [D1, D2] diff --git a/lab1/mapping.yaml b/lab1/inputs/mapping/mapping.yaml similarity index 100% rename from lab1/mapping.yaml rename to lab1/inputs/mapping/mapping.yaml diff --git a/lab1/resnet18_first_layer.onnx b/lab1/inputs/workload/resnet18_first_layer.onnx similarity index 100% rename from lab1/resnet18_first_layer.onnx rename to lab1/inputs/workload/resnet18_first_layer.onnx diff --git a/lab1/main.py b/lab1/main.py index 70cb08dad..592a7b46f 100644 --- a/lab1/main.py +++ b/lab1/main.py @@ -1,5 +1,8 @@ import logging +import sys +import os +sys.path.insert(0, os.getcwd()) # For importing zigzag from zigzag.api import get_hardware_performance_zigzag from zigzag.visualization.results.plot_cme import ( bar_plot_cost_model_evaluations_breakdown, @@ -18,9 +21,9 @@ pickle_name = f"{experiment_id}-saved_list_of_cmes" # Define main input paths -accelerator = "zigzag/inputs/hardware/tpu_like.yaml" -workload = "lab1/resnet18_first_layer.onnx" -mapping = "lab1/mapping.yaml" +accelerator = "lab1/inputs/hardware/accelerator1.yaml" +workload = "lab1/inputs/workload/resnet18_first_layer.onnx" +mapping = "lab1/inputs/mapping/mapping.yaml" # Define other inputs of api call temporal_mapping_search_engine = "loma" diff --git a/lab2/inputs/hardware/accelerator1.yaml b/lab2/inputs/hardware/accelerator1.yaml new file mode 100644 index 000000000..895a97283 --- /dev/null +++ b/lab2/inputs/hardware/accelerator1.yaml @@ -0,0 +1,124 @@ +name: accelerator1 + +operational_array: + unit_energy: 0.04 # pJ + unit_area: 1 # unit + dimensions: [D1, D2] + sizes: [32, 32] + +memories: + rf_1B: + size: 8 + r_bw: 8 + w_bw: 8 + r_cost: 0.095 # TODO + w_cost: 0.095 # TODO + area: 0 + r_port: 1 + w_port: 1 + rw_port: 0 + latency: 1 + auto_cost_extraction: False + operands: [I2] + ports: + - fh: w_port_1 + tl: r_port_1 + served_dimensions: [] # Fully unrolled over all multipliers + + rf_1B: + size: 8 + r_bw: 8 + w_bw: 8 + r_cost: 0.095 # TODO + w_cost: 0.095 # TODO + area: 0 + r_port: 1 + w_port: 1 + rw_port: 0 + latency: 1 + auto_cost_extraction: False + operands: [I1] + ports: + - fh: w_port_1 + tl: r_port_1 + served_dimensions: [D2] # One RF per column + + rf_4B: + size: 32 + r_bw: 32 + w_bw: 32 + r_cost: 0.021 # TODO + w_cost: 0.021 # TODO + area: 0 + r_port: 2 + w_port: 2 + rw_port: 0 + latency: 1 + operands: [O] + ports: + - fh: w_port_1 + tl: r_port_1 + fl: w_port_2 + th: r_port_2 + served_dimensions: [D1] # One RF per row + + sram_16KB_I2: + size: 131072 + r_bw: 256 + w_bw: 256 + r_cost: 10.5 + w_cost: 12.8 + area: 0 + r_port: 1 + w_port: 1 + rw_port: 0 + latency: 1 + operands: [I2] + ports: + - fh: w_port_1 + tl: r_port_1 + served_dimensions: [D1, D2] + + sram_64KB_I1_O: + size: 524288 + r_bw: 512 + w_bw: 512 + r_cost: 19.6 + w_cost: 22.5 + area: 0 + r_port: 0 + w_port: 0 + rw_port: 2 + latency: 1 + operands: [I1, O] + ports: + - fh: rw_port_1 + tl: rw_port_1 + - fh: rw_port_1 + tl: rw_port_1 + fl: rw_port_2 + th: rw_port_2 + served_dimensions: [D1, D2] + + dram: + size: 4294967296 + r_bw: 64 + w_bw: 64 + r_cost: 100 # TODO + w_cost: 150 # TODO + area: 0 + r_port: 0 + w_port: 0 + rw_port: 1 + latency: 1 + operands: [I1, I2, O] + ports: + - fh: rw_port_1 + tl: rw_port_1 + - fh: rw_port_1 + tl: rw_port_1 + - fh: rw_port_1 + tl: rw_port_1 + fl: rw_port_1 + th: rw_port_1 + served_dimensions: [D1, D2] diff --git a/lab2/mapping.yaml b/lab2/inputs/mapping/mapping.yaml similarity index 100% rename from lab2/mapping.yaml rename to lab2/inputs/mapping/mapping.yaml diff --git a/lab2/resnet18_first_layer.onnx b/lab2/inputs/workload/resnet18_first_layer.onnx similarity index 100% rename from lab2/resnet18_first_layer.onnx rename to lab2/inputs/workload/resnet18_first_layer.onnx diff --git a/lab2/main.py b/lab2/main.py index fee13fdb6..74fdd7264 100644 --- a/lab2/main.py +++ b/lab2/main.py @@ -13,15 +13,15 @@ logging.basicConfig(level=logging_level, format=logging_format) # Define the experiment id and pickle name -hw_name = "tpu_like" +hw_name = "accelerator1" workload_name = "resnet18_first_layer" experiment_id = f"{hw_name}-{workload_name}" pickle_name = f"{experiment_id}-saved_list_of_cmes" # Define main input paths -accelerator = "zigzag/inputs/hardware/tpu_like.yaml" -workload = "lab2/resnet18_first_layer.onnx" -mapping = "lab2/mapping.yaml" +accelerator = "lab2/inputs/hardware/accelerator1.yaml" +workload = "lab2/inputs/workload/resnet18_first_layer.onnx" +mapping = "lab2/inputs/mapping/mapping.yaml" assert os.path.exists(mapping), "Copy mapping.yaml from lab1 and modify it for lab2." # Define other inputs of api call diff --git a/lab3/inputs/hardware/accelerator1.yaml b/lab3/inputs/hardware/accelerator1.yaml index fb810d7c0..895a97283 100644 --- a/lab3/inputs/hardware/accelerator1.yaml +++ b/lab3/inputs/hardware/accelerator1.yaml @@ -64,10 +64,10 @@ memories: sram_16KB_I2: size: 131072 - r_bw: 128 - w_bw: 128 - r_cost: 416.16 # TODO - w_cost: 378.4 # TODO + r_bw: 256 + w_bw: 256 + r_cost: 10.5 + w_cost: 12.8 area: 0 r_port: 1 w_port: 1 @@ -83,8 +83,8 @@ memories: size: 524288 r_bw: 512 w_bw: 512 - r_cost: 416.16 # TODO - w_cost: 378.4 # TODO + r_cost: 19.6 + w_cost: 22.5 area: 0 r_port: 0 w_port: 0 @@ -104,8 +104,8 @@ memories: size: 4294967296 r_bw: 64 w_bw: 64 - r_cost: 700 # TODO - w_cost: 750 # TODO + r_cost: 100 # TODO + w_cost: 150 # TODO area: 0 r_port: 0 w_port: 0 diff --git a/lab3/inputs/hardware/accelerator2.yaml b/lab3/inputs/hardware/accelerator2.yaml index 640d13ac4..f80cee25c 100644 --- a/lab3/inputs/hardware/accelerator2.yaml +++ b/lab3/inputs/hardware/accelerator2.yaml @@ -64,10 +64,10 @@ memories: sram_16KB_I2: size: 131072 - r_bw: 128 - w_bw: 128 - r_cost: 416.16 # TODO - w_cost: 378.4 # TODO + r_bw: 256 + w_bw: 256 + r_cost: 10.5 + w_cost: 12.8 area: 0 r_port: 1 w_port: 1 @@ -83,8 +83,8 @@ memories: size: 524288 r_bw: 512 w_bw: 512 - r_cost: 416.16 # TODO - w_cost: 378.4 # TODO + r_cost: 19.6 + w_cost: 22.5 area: 0 r_port: 0 w_port: 0 @@ -104,8 +104,8 @@ memories: size: 4294967296 r_bw: 64 w_bw: 64 - r_cost: 700 # TODO - w_cost: 750 # TODO + r_cost: 100 + w_cost: 150 area: 0 r_port: 0 w_port: 0 diff --git a/lab3/inputs/hardware/accelerator3.yaml b/lab3/inputs/hardware/accelerator3.yaml index 42693ffe9..1ac328e52 100644 --- a/lab3/inputs/hardware/accelerator3.yaml +++ b/lab3/inputs/hardware/accelerator3.yaml @@ -64,10 +64,10 @@ memories: sram_16KB_I2: size: 131072 - r_bw: 128 - w_bw: 128 - r_cost: 416.16 # TODO - w_cost: 378.4 # TODO + r_bw: 256 + w_bw: 256 + r_cost: 10.5 + w_cost: 12.8 area: 0 r_port: 1 w_port: 1 @@ -83,8 +83,8 @@ memories: size: 524288 r_bw: 512 w_bw: 512 - r_cost: 416.16 # TODO - w_cost: 378.4 # TODO + r_cost: 19.6 + w_cost: 22.5 area: 0 r_port: 0 w_port: 0 @@ -104,8 +104,8 @@ memories: size: 4294967296 r_bw: 64 w_bw: 64 - r_cost: 700 # TODO - w_cost: 750 # TODO + r_cost: 700 + w_cost: 750 area: 0 r_port: 0 w_port: 0